summaryrefslogtreecommitdiff
path: root/src/mongo/s
diff options
context:
space:
mode:
Diffstat (limited to 'src/mongo/s')
-rw-r--r--src/mongo/s/async_requests_sender.cpp31
-rw-r--r--src/mongo/s/async_requests_sender.h15
-rw-r--r--src/mongo/s/balancer_configuration.cpp30
-rw-r--r--src/mongo/s/balancer_configuration.h10
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog.h23
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.cpp53
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_impl.h27
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.cpp23
-rw-r--r--src/mongo/s/catalog/dist_lock_catalog_mock.h23
-rw-r--r--src/mongo/s/catalog/dist_lock_manager.cpp20
-rw-r--r--src/mongo/s/catalog/dist_lock_manager.h20
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.cpp14
-rw-r--r--src/mongo/s/catalog/dist_lock_manager_mock.h16
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.cpp52
-rw-r--r--src/mongo/s/catalog/replset_dist_lock_manager.h16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client.h54
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.cpp287
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_impl.h74
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.cpp59
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_mock.h54
-rw-r--r--src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp16
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager.h24
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp66
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_impl.cpp50
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_impl.h36
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp75
-rw-r--r--src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp60
-rw-r--r--src/mongo/s/catalog_cache.cpp6
-rw-r--r--src/mongo/s/catalog_cache.h2
-rw-r--r--src/mongo/s/chunk_diff.cpp6
-rw-r--r--src/mongo/s/chunk_diff.h6
-rw-r--r--src/mongo/s/chunk_diff_test.cpp6
-rw-r--r--src/mongo/s/chunk_manager.cpp27
-rw-r--r--src/mongo/s/chunk_manager.h6
-rw-r--r--src/mongo/s/client/parallel.cpp52
-rw-r--r--src/mongo/s/client/parallel.h14
-rw-r--r--src/mongo/s/client/shard.cpp28
-rw-r--r--src/mongo/s/client/shard.h18
-rw-r--r--src/mongo/s/client/shard_connection.cpp18
-rw-r--r--src/mongo/s/client/shard_connection.h2
-rw-r--r--src/mongo/s/client/shard_local.cpp30
-rw-r--r--src/mongo/s/client/shard_local.h8
-rw-r--r--src/mongo/s/client/shard_local_test.cpp20
-rw-r--r--src/mongo/s/client/shard_registry.cpp44
-rw-r--r--src/mongo/s/client/shard_registry.h8
-rw-r--r--src/mongo/s/client/shard_remote.cpp38
-rw-r--r--src/mongo/s/client/shard_remote.h9
-rw-r--r--src/mongo/s/client/sharding_connection_hook.cpp4
-rw-r--r--src/mongo/s/client/version_manager.cpp34
-rw-r--r--src/mongo/s/cluster_identity_loader.cpp10
-rw-r--r--src/mongo/s/cluster_identity_loader.h4
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.cpp57
-rw-r--r--src/mongo/s/commands/chunk_manager_targeter.h18
-rw-r--r--src/mongo/s/commands/cluster_add_shard_cmd.cpp10
-rw-r--r--src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_aggregate.cpp46
-rw-r--r--src/mongo/s/commands/cluster_aggregate.h6
-rw-r--r--src/mongo/s/commands/cluster_apply_ops_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_available_query_options_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_commands_common.cpp18
-rw-r--r--src/mongo/s/commands/cluster_commands_common.h4
-rw-r--r--src/mongo/s/commands/cluster_control_balancer_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp14
-rw-r--r--src/mongo/s/commands/cluster_drop_cmd.cpp22
-rw-r--r--src/mongo/s/commands/cluster_drop_database_cmd.cpp41
-rw-r--r--src/mongo/s/commands/cluster_enable_sharding_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp10
-rw-r--r--src/mongo/s/commands/cluster_explain.h4
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_find_and_modify_cmd.cpp46
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp16
-rw-r--r--src/mongo/s/commands/cluster_flush_router_config_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_fsync_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_ftdc_commands.cpp2
-rw-r--r--src/mongo/s/commands/cluster_get_last_error_cmd.cpp17
-rw-r--r--src/mongo/s/commands/cluster_get_prev_error_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_get_shard_map_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_get_shard_version_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_getmore_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_index_filter_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_is_db_grid_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_is_master_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_kill_op.cpp4
-rw-r--r--src/mongo/s/commands/cluster_killcursors_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_list_databases_cmd.cpp10
-rw-r--r--src/mongo/s/commands/cluster_list_shards_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp62
-rw-r--r--src/mongo/s/commands/cluster_merge_chunks_cmd.cpp8
-rw-r--r--src/mongo/s/commands/cluster_move_chunk_cmd.cpp15
-rw-r--r--src/mongo/s/commands/cluster_move_primary_cmd.cpp32
-rw-r--r--src/mongo/s/commands/cluster_netstat_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_plan_cache_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_profile_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_cmd.cpp12
-rw-r--r--src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_reset_error_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_shard_collection_cmd.cpp36
-rw-r--r--src/mongo/s/commands/cluster_shutdown_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_split_cmd.cpp18
-rw-r--r--src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp6
-rw-r--r--src/mongo/s/commands/cluster_user_management_commands.cpp122
-rw-r--r--src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_write.cpp53
-rw-r--r--src/mongo/s/commands/cluster_write.h4
-rw-r--r--src/mongo/s/commands/cluster_write_cmd.cpp35
-rw-r--r--src/mongo/s/commands/commands_public.cpp194
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.cpp12
-rw-r--r--src/mongo/s/commands/run_on_all_shards_cmd.h4
-rw-r--r--src/mongo/s/commands/strategy.cpp82
-rw-r--r--src/mongo/s/commands/strategy.h16
-rw-r--r--src/mongo/s/config.cpp36
-rw-r--r--src/mongo/s/config.h8
-rw-r--r--src/mongo/s/config_server_client.cpp12
-rw-r--r--src/mongo/s/config_server_client.h4
-rw-r--r--src/mongo/s/config_server_test_fixture.cpp20
-rw-r--r--src/mongo/s/config_server_test_fixture.h10
-rw-r--r--src/mongo/s/grid.h2
-rw-r--r--src/mongo/s/local_sharding_info.cpp6
-rw-r--r--src/mongo/s/local_sharding_info.h2
-rw-r--r--src/mongo/s/ns_targeter.h8
-rw-r--r--src/mongo/s/query/async_results_merger.cpp24
-rw-r--r--src/mongo/s/query/async_results_merger.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor.h4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.cpp18
-rw-r--r--src/mongo/s/query/cluster_client_cursor_impl.h10
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.cpp4
-rw-r--r--src/mongo/s/query/cluster_client_cursor_mock.h4
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.cpp10
-rw-r--r--src/mongo/s/query/cluster_cursor_manager.h6
-rw-r--r--src/mongo/s/query/cluster_find.cpp41
-rw-r--r--src/mongo/s/query/cluster_find.h4
-rw-r--r--src/mongo/s/query/router_exec_stage.h4
-rw-r--r--src/mongo/s/query/router_stage_limit.cpp8
-rw-r--r--src/mongo/s/query/router_stage_limit.h4
-rw-r--r--src/mongo/s/query/router_stage_merge.cpp8
-rw-r--r--src/mongo/s/query/router_stage_merge.h4
-rw-r--r--src/mongo/s/query/router_stage_mock.cpp4
-rw-r--r--src/mongo/s/query/router_stage_mock.h4
-rw-r--r--src/mongo/s/query/router_stage_remove_sortkey.cpp8
-rw-r--r--src/mongo/s/query/router_stage_remove_sortkey.h4
-rw-r--r--src/mongo/s/query/router_stage_skip.cpp10
-rw-r--r--src/mongo/s/query/router_stage_skip.h4
-rw-r--r--src/mongo/s/query/store_possible_cursor.cpp6
-rw-r--r--src/mongo/s/query/store_possible_cursor.h2
-rw-r--r--src/mongo/s/s_sharding_server_status.cpp6
-rw-r--r--src/mongo/s/server.cpp22
-rw-r--r--src/mongo/s/service_entry_point_mongos.cpp20
-rw-r--r--src/mongo/s/shard_key_pattern.cpp5
-rw-r--r--src/mongo/s/shard_key_pattern.h2
-rw-r--r--src/mongo/s/shard_key_pattern_test.cpp4
-rw-r--r--src/mongo/s/shard_util.cpp18
-rw-r--r--src/mongo/s/shard_util.h6
-rw-r--r--src/mongo/s/sharding_egress_metadata_hook.cpp10
-rw-r--r--src/mongo/s/sharding_egress_metadata_hook.h4
-rw-r--r--src/mongo/s/sharding_initialization.cpp16
-rw-r--r--src/mongo/s/sharding_initialization.h6
-rw-r--r--src/mongo/s/sharding_raii.cpp32
-rw-r--r--src/mongo/s/sharding_raii.h10
-rw-r--r--src/mongo/s/sharding_uptime_reporter.cpp17
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.cpp10
-rw-r--r--src/mongo/s/write_ops/batch_write_exec.h2
-rw-r--r--src/mongo/s/write_ops/batch_write_op.cpp4
-rw-r--r--src/mongo/s/write_ops/batch_write_op.h2
-rw-r--r--src/mongo/s/write_ops/batch_write_op_test.cpp150
-rw-r--r--src/mongo/s/write_ops/mock_ns_targeter.h10
-rw-r--r--src/mongo/s/write_ops/write_op.cpp8
-rw-r--r--src/mongo/s/write_ops/write_op.h2
-rw-r--r--src/mongo/s/write_ops/write_op_test.cpp24
171 files changed, 1792 insertions, 1715 deletions
diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp
index 3c3cd194f78..1ec65430f75 100644
--- a/src/mongo/s/async_requests_sender.cpp
+++ b/src/mongo/s/async_requests_sender.cpp
@@ -51,7 +51,7 @@ const int kMaxNumFailedHostRetryAttempts = 3;
} // namespace
-AsyncRequestsSender::AsyncRequestsSender(OperationContext* txn,
+AsyncRequestsSender::AsyncRequestsSender(OperationContext* opCtx,
executor::TaskExecutor* executor,
StringData db,
const std::vector<AsyncRequestsSender::Request>& requests,
@@ -73,7 +73,7 @@ AsyncRequestsSender::AsyncRequestsSender(OperationContext* txn,
_metadataObj = metadataBuilder.obj();
// Schedule the requests immediately.
- _scheduleRequestsIfNeeded(txn);
+ _scheduleRequestsIfNeeded(opCtx);
}
AsyncRequestsSender::~AsyncRequestsSender() {
@@ -81,7 +81,7 @@ AsyncRequestsSender::~AsyncRequestsSender() {
}
std::vector<AsyncRequestsSender::Response> AsyncRequestsSender::waitForResponses(
- OperationContext* txn) {
+ OperationContext* opCtx) {
invariant(!_remotes.empty());
// Until all remotes have received a response or error, keep scheduling retries and waiting on
@@ -91,7 +91,7 @@ std::vector<AsyncRequestsSender::Response> AsyncRequestsSender::waitForResponses
// Note: if we have been interrupt()'d or if some remote had a non-retriable error and
// allowPartialResults is false, no retries will be scheduled.
- _scheduleRequestsIfNeeded(txn);
+ _scheduleRequestsIfNeeded(opCtx);
}
// Construct the responses.
@@ -152,7 +152,7 @@ bool AsyncRequestsSender::_done_inlock() {
* 2. Remotes that already successfully received a response will have a non-empty 'response'.
* 3. Remotes that have reached maximum retries will have an error status.
*/
-void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* txn) {
+void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
// We can't make a new notification if there was a previous one that has not been signaled.
@@ -175,7 +175,7 @@ void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* txn) {
// If we have not yet received a response or error for this remote, and we do not have an
// outstanding request for this remote, schedule remote work to send the command.
if (!remote.swResponse && !remote.cbHandle.isValid()) {
- auto scheduleStatus = _scheduleRequest_inlock(txn, i);
+ auto scheduleStatus = _scheduleRequest_inlock(opCtx, i);
if (!scheduleStatus.isOK()) {
// Being unable to schedule a request to a remote is a non-retriable error.
remote.swResponse = std::move(scheduleStatus);
@@ -191,7 +191,7 @@ void AsyncRequestsSender::_scheduleRequestsIfNeeded(OperationContext* txn) {
}
}
-Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* txn, size_t remoteIndex) {
+Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* opCtx, size_t remoteIndex) {
auto& remote = _remotes[remoteIndex];
invariant(!remote.cbHandle.isValid());
@@ -203,12 +203,15 @@ Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* txn, size_
}
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _db.toString(), remote.cmdObj, _metadataObj, txn);
-
- auto callbackStatus = _executor->scheduleRemoteCommand(
- request,
- stdx::bind(
- &AsyncRequestsSender::_handleResponse, this, stdx::placeholders::_1, txn, remoteIndex));
+ remote.getTargetHost(), _db.toString(), remote.cmdObj, _metadataObj, opCtx);
+
+ auto callbackStatus =
+ _executor->scheduleRemoteCommand(request,
+ stdx::bind(&AsyncRequestsSender::_handleResponse,
+ this,
+ stdx::placeholders::_1,
+ opCtx,
+ remoteIndex));
if (!callbackStatus.isOK()) {
return callbackStatus.getStatus();
}
@@ -219,7 +222,7 @@ Status AsyncRequestsSender::_scheduleRequest_inlock(OperationContext* txn, size_
void AsyncRequestsSender::_handleResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
diff --git a/src/mongo/s/async_requests_sender.h b/src/mongo/s/async_requests_sender.h
index daf48c558aa..9f8664f9c2a 100644
--- a/src/mongo/s/async_requests_sender.h
+++ b/src/mongo/s/async_requests_sender.h
@@ -55,8 +55,9 @@ namespace mongo {
*
* Typical usage is:
*
- * AsyncRequestsSender ars(txn, executor, db, requests, readPrefSetting); // schedule the requests
- * auto responses = ars.waitForResponses(txn); // wait for responses; retries on retriable erors
+ * AsyncRequestsSender ars(opCtx, executor, db, requests, readPrefSetting); // schedule the
+ * requests
+ * auto responses = ars.waitForResponses(opCtx); // wait for responses; retries on retriable erors
*
* Additionally, you can interrupt() (if you want waitForResponses() to wait for responses for
* outstanding requests but stop scheduling retries) or kill() (if you want to cancel outstanding
@@ -103,7 +104,7 @@ public:
* Constructs a new AsyncRequestsSender. The TaskExecutor* must remain valid for the lifetime of
* the ARS.
*/
- AsyncRequestsSender(OperationContext* txn,
+ AsyncRequestsSender(OperationContext* opCtx,
executor::TaskExecutor* executor,
StringData db,
const std::vector<AsyncRequestsSender::Request>& requests,
@@ -122,7 +123,7 @@ public:
*
* Must only be called once.
*/
- std::vector<Response> waitForResponses(OperationContext* txn);
+ std::vector<Response> waitForResponses(OperationContext* opCtx);
/**
* Stops the ARS from retrying requests. Causes waitForResponses() to wait until any outstanding
@@ -161,7 +162,7 @@ private:
*
* Invalid to call if there is an existing Notification and it has not yet been signaled.
*/
- void _scheduleRequestsIfNeeded(OperationContext* txn);
+ void _scheduleRequestsIfNeeded(OperationContext* opCtx);
/**
* Helper to schedule a command to a remote.
@@ -171,7 +172,7 @@ private:
*
* Returns success if the command to retrieve the next batch was scheduled successfully.
*/
- Status _scheduleRequest_inlock(OperationContext* txn, size_t remoteIndex);
+ Status _scheduleRequest_inlock(OperationContext* opCtx, size_t remoteIndex);
/**
* The callback for a remote command.
@@ -185,7 +186,7 @@ private:
* On a non-retriable error, if allowPartialResults is false, sets _stopRetrying to true.
*/
void _handleResponse(const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex);
/**
diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp
index 79098a2ec4a..b332444a389 100644
--- a/src/mongo/s/balancer_configuration.cpp
+++ b/src/mongo/s/balancer_configuration.cpp
@@ -77,10 +77,10 @@ BalancerSettingsType::BalancerMode BalancerConfiguration::getBalancerMode() cons
return _balancerSettings.getMode();
}
-Status BalancerConfiguration::setBalancerMode(OperationContext* txn,
+Status BalancerConfiguration::setBalancerMode(OperationContext* opCtx,
BalancerSettingsType::BalancerMode mode) {
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
kSettingsNamespace.ns(),
BSON("_id" << BalancerSettingsType::kKey),
BSON("$set" << BSON(kStopped << (mode == BalancerSettingsType::kOff) << kMode
@@ -88,7 +88,7 @@ Status BalancerConfiguration::setBalancerMode(OperationContext* txn,
true,
ShardingCatalogClient::kMajorityWriteConcern);
- Status refreshStatus = refreshAndCheck(txn);
+ Status refreshStatus = refreshAndCheck(opCtx);
if (!refreshStatus.isOK()) {
return refreshStatus;
}
@@ -131,9 +131,9 @@ bool BalancerConfiguration::waitForDelete() const {
return _balancerSettings.waitForDelete();
}
-Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
+Status BalancerConfiguration::refreshAndCheck(OperationContext* opCtx) {
// Balancer configuration
- Status balancerSettingsStatus = _refreshBalancerSettings(txn);
+ Status balancerSettingsStatus = _refreshBalancerSettings(opCtx);
if (!balancerSettingsStatus.isOK()) {
return {balancerSettingsStatus.code(),
str::stream() << "Failed to refresh the balancer settings due to "
@@ -141,7 +141,7 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
}
// Chunk size settings
- Status chunkSizeStatus = _refreshChunkSizeSettings(txn);
+ Status chunkSizeStatus = _refreshChunkSizeSettings(opCtx);
if (!chunkSizeStatus.isOK()) {
return {chunkSizeStatus.code(),
str::stream() << "Failed to refresh the chunk sizes settings due to "
@@ -149,7 +149,7 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
}
// AutoSplit settings
- Status autoSplitStatus = _refreshAutoSplitSettings(txn);
+ Status autoSplitStatus = _refreshAutoSplitSettings(opCtx);
if (!autoSplitStatus.isOK()) {
return {autoSplitStatus.code(),
str::stream() << "Failed to refresh the autoSplit settings due to "
@@ -159,11 +159,11 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* txn) {
return Status::OK();
}
-Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* txn) {
+Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx) {
BalancerSettingsType settings = BalancerSettingsType::createDefault();
- auto settingsObjStatus =
- Grid::get(txn)->catalogClient(txn)->getGlobalSettings(txn, BalancerSettingsType::kKey);
+ auto settingsObjStatus = Grid::get(opCtx)->catalogClient(opCtx)->getGlobalSettings(
+ opCtx, BalancerSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = BalancerSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
@@ -181,11 +181,11 @@ Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* txn) {
return Status::OK();
}
-Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* txn) {
+Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* opCtx) {
ChunkSizeSettingsType settings = ChunkSizeSettingsType::createDefault();
auto settingsObjStatus =
- grid.catalogClient(txn)->getGlobalSettings(txn, ChunkSizeSettingsType::kKey);
+ grid.catalogClient(opCtx)->getGlobalSettings(opCtx, ChunkSizeSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = ChunkSizeSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
@@ -207,11 +207,11 @@ Status BalancerConfiguration::_refreshChunkSizeSettings(OperationContext* txn) {
return Status::OK();
}
-Status BalancerConfiguration::_refreshAutoSplitSettings(OperationContext* txn) {
+Status BalancerConfiguration::_refreshAutoSplitSettings(OperationContext* opCtx) {
AutoSplitSettingsType settings = AutoSplitSettingsType::createDefault();
auto settingsObjStatus =
- grid.catalogClient(txn)->getGlobalSettings(txn, AutoSplitSettingsType::kKey);
+ grid.catalogClient(opCtx)->getGlobalSettings(opCtx, AutoSplitSettingsType::kKey);
if (settingsObjStatus.isOK()) {
auto settingsStatus = AutoSplitSettingsType::fromBSON(settingsObjStatus.getValue());
if (!settingsStatus.isOK()) {
diff --git a/src/mongo/s/balancer_configuration.h b/src/mongo/s/balancer_configuration.h
index 2f5370d162c..71a3832f537 100644
--- a/src/mongo/s/balancer_configuration.h
+++ b/src/mongo/s/balancer_configuration.h
@@ -214,7 +214,7 @@ public:
/**
* Synchronous method, which writes the balancer mode to the configuration data.
*/
- Status setBalancerMode(OperationContext* txn, BalancerSettingsType::BalancerMode mode);
+ Status setBalancerMode(OperationContext* opCtx, BalancerSettingsType::BalancerMode mode);
/**
* Returns whether balancing is allowed based on both the enabled state of the balancer and the
@@ -254,27 +254,27 @@ public:
* This method is thread-safe but it doesn't make sense to be called from more than one thread
* at a time.
*/
- Status refreshAndCheck(OperationContext* txn);
+ Status refreshAndCheck(OperationContext* opCtx);
private:
/**
* Reloads the balancer configuration from the settings document. Fails if the settings document
* cannot be read, in which case the values will remain unchanged.
*/
- Status _refreshBalancerSettings(OperationContext* txn);
+ Status _refreshBalancerSettings(OperationContext* opCtx);
/**
* Reloads the chunk sizes configuration from the settings document. Fails if the settings
* document cannot be read or if any setting contains invalid value, in which case the offending
* value will remain unchanged.
*/
- Status _refreshChunkSizeSettings(OperationContext* txn);
+ Status _refreshChunkSizeSettings(OperationContext* opCtx);
/**
* Reloads the autosplit configuration from the settings document. Fails if the settings
* document cannot be read.
*/
- Status _refreshAutoSplitSettings(OperationContext* txn);
+ Status _refreshAutoSplitSettings(OperationContext* opCtx);
// The latest read balancer settings and a mutex to protect its swaps
mutable stdx::mutex _balancerSettingsMutex;
diff --git a/src/mongo/s/catalog/dist_lock_catalog.h b/src/mongo/s/catalog/dist_lock_catalog.h
index 8bccfc6c8a7..5b2c392eb28 100644
--- a/src/mongo/s/catalog/dist_lock_catalog.h
+++ b/src/mongo/s/catalog/dist_lock_catalog.h
@@ -73,13 +73,13 @@ public:
* Returns the ping document of the specified processID.
* Common status errors include socket errors.
*/
- virtual StatusWith<LockpingsType> getPing(OperationContext* txn, StringData processID) = 0;
+ virtual StatusWith<LockpingsType> getPing(OperationContext* opCtx, StringData processID) = 0;
/**
* Updates the ping document. Creates a new entry if it does not exists.
* Common status errors include socket errors.
*/
- virtual Status ping(OperationContext* txn, StringData processID, Date_t ping) = 0;
+ virtual Status ping(OperationContext* opCtx, StringData processID, Date_t ping) = 0;
/**
* Attempts to update the owner of a lock identified by lockID to lockSessionID.
@@ -98,7 +98,7 @@ public:
* Common status errors include socket and duplicate key errors.
*/
virtual StatusWith<LocksType> grabLock(
- OperationContext* txn,
+ OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -122,7 +122,7 @@ public:
*
* Common status errors include socket errors.
*/
- virtual StatusWith<LocksType> overtakeLock(OperationContext* txn,
+ virtual StatusWith<LocksType> overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -137,46 +137,47 @@ public:
* specified session (i.e., it is not owned at all or if it is owned by a different session).
* Otherwise, it returns an error status. Common errors include socket errors.
*/
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID) = 0;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) = 0;
/**
* Same as unlock() above except that it unlocks the lock document that matches "lockSessionID"
* AND "name", rather than just "lockSessionID". This is necessary if multiple documents have
* been locked with the same lockSessionID.
*/
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID, StringData name) = 0;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID, StringData name) = 0;
/**
* Unlocks all distributed locks with the given owning process ID. Does not provide any
* indication as to how many locks were actually unlocked. So long as the update command runs
* successfully, returns OK, otherwise returns an error status.
*/
- virtual Status unlockAll(OperationContext* txn, const std::string& processID) = 0;
+ virtual Status unlockAll(OperationContext* opCtx, const std::string& processID) = 0;
/**
* Get some information from the config server primary.
* Common status errors include socket errors.
*/
- virtual StatusWith<ServerInfo> getServerInfo(OperationContext* txn) = 0;
+ virtual StatusWith<ServerInfo> getServerInfo(OperationContext* opCtx) = 0;
/**
* Returns the lock document.
* Returns LockNotFound if lock document doesn't exist.
* Common status errors include socket errors.
*/
- virtual StatusWith<LocksType> getLockByTS(OperationContext* txn, const OID& lockSessionID) = 0;
+ virtual StatusWith<LocksType> getLockByTS(OperationContext* opCtx,
+ const OID& lockSessionID) = 0;
/**
* Returns the lock document.
* Common status errors include socket errors.
*/
- virtual StatusWith<LocksType> getLockByName(OperationContext* txn, StringData name) = 0;
+ virtual StatusWith<LocksType> getLockByName(OperationContext* opCtx, StringData name) = 0;
/**
* Attempts to delete the ping document corresponding to the given processId.
* Common status errors include socket errors.
*/
- virtual Status stopPing(OperationContext* txn, StringData processId) = 0;
+ virtual Status stopPing(OperationContext* opCtx, StringData processId) = 0;
protected:
DistLockCatalog();
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
index 754db72ae6f..ee094b0baf9 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.cpp
@@ -164,10 +164,10 @@ DistLockCatalogImpl::DistLockCatalogImpl(ShardRegistry* shardRegistry)
DistLockCatalogImpl::~DistLockCatalogImpl() = default;
-StatusWith<LockpingsType> DistLockCatalogImpl::getPing(OperationContext* txn,
+StatusWith<LockpingsType> DistLockCatalogImpl::getPing(OperationContext* opCtx,
StringData processID) {
auto findResult = _findOnConfig(
- txn, kReadPref, _lockPingNS, BSON(LockpingsType::process() << processID), BSONObj(), 1);
+ opCtx, kReadPref, _lockPingNS, BSON(LockpingsType::process() << processID), BSONObj(), 1);
if (!findResult.isOK()) {
return findResult.getStatus();
@@ -191,7 +191,7 @@ StatusWith<LockpingsType> DistLockCatalogImpl::getPing(OperationContext* txn,
return pingDocResult.getValue();
}
-Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Date_t ping) {
+Status DistLockCatalogImpl::ping(OperationContext* opCtx, StringData processID, Date_t ping) {
auto request =
FindAndModifyRequest::makeUpdate(_lockPingNS,
BSON(LockpingsType::process() << processID),
@@ -200,7 +200,7 @@ Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Da
request.setWriteConcern(kMajorityWriteConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -211,7 +211,7 @@ Status DistLockCatalogImpl::ping(OperationContext* txn, StringData processID, Da
return findAndModifyStatus.getStatus();
}
-StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -237,7 +237,7 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
request.setWriteConcern(writeConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -266,7 +266,7 @@ StatusWith<LocksType> DistLockCatalogImpl::grabLock(OperationContext* txn,
return locksTypeResult.getValue();
}
-StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -294,7 +294,7 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
request.setWriteConcern(kMajorityWriteConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -317,16 +317,16 @@ StatusWith<LocksType> DistLockCatalogImpl::overtakeLock(OperationContext* txn,
return locksTypeResult.getValue();
}
-Status DistLockCatalogImpl::unlock(OperationContext* txn, const OID& lockSessionID) {
+Status DistLockCatalogImpl::unlock(OperationContext* opCtx, const OID& lockSessionID) {
FindAndModifyRequest request = FindAndModifyRequest::makeUpdate(
_locksNS,
BSON(LocksType::lockID(lockSessionID)),
BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))));
request.setWriteConcern(kMajorityWriteConcern);
- return _unlock(txn, request);
+ return _unlock(opCtx, request);
}
-Status DistLockCatalogImpl::unlock(OperationContext* txn,
+Status DistLockCatalogImpl::unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) {
FindAndModifyRequest request = FindAndModifyRequest::makeUpdate(
@@ -334,12 +334,12 @@ Status DistLockCatalogImpl::unlock(OperationContext* txn,
BSON(LocksType::lockID(lockSessionID) << LocksType::name(name.toString())),
BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))));
request.setWriteConcern(kMajorityWriteConcern);
- return _unlock(txn, request);
+ return _unlock(opCtx, request);
}
-Status DistLockCatalogImpl::_unlock(OperationContext* txn, const FindAndModifyRequest& request) {
+Status DistLockCatalogImpl::_unlock(OperationContext* opCtx, const FindAndModifyRequest& request) {
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -357,7 +357,7 @@ Status DistLockCatalogImpl::_unlock(OperationContext* txn, const FindAndModifyRe
return findAndModifyStatus.getStatus();
}
-Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string& processID) {
+Status DistLockCatalogImpl::unlockAll(OperationContext* opCtx, const std::string& processID) {
std::unique_ptr<BatchedUpdateDocument> updateDoc(new BatchedUpdateDocument());
updateDoc->setQuery(BSON(LocksType::process(processID)));
updateDoc->setUpdateExpr(BSON("$set" << BSON(LocksType::state(LocksType::UNLOCKED))));
@@ -374,7 +374,7 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
BSONObj cmdObj = request.toBSON();
auto response = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
cmdObj,
@@ -403,9 +403,10 @@ Status DistLockCatalogImpl::unlockAll(OperationContext* txn, const std::string&
return batchResponse.toStatus();
}
-StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(OperationContext* txn) {
+StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(
+ OperationContext* opCtx) {
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
kReadPref,
"admin",
BSON("serverStatus" << 1),
@@ -438,10 +439,10 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogImpl::getServerInfo(Opera
return DistLockCatalog::ServerInfo(localTimeElem.date(), electionIdStatus.getValue());
}
-StatusWith<LocksType> DistLockCatalogImpl::getLockByTS(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogImpl::getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) {
auto findResult = _findOnConfig(
- txn, kReadPref, _locksNS, BSON(LocksType::lockID(lockSessionID)), BSONObj(), 1);
+ opCtx, kReadPref, _locksNS, BSON(LocksType::lockID(lockSessionID)), BSONObj(), 1);
if (!findResult.isOK()) {
return findResult.getStatus();
@@ -465,9 +466,9 @@ StatusWith<LocksType> DistLockCatalogImpl::getLockByTS(OperationContext* txn,
return locksTypeResult.getValue();
}
-StatusWith<LocksType> DistLockCatalogImpl::getLockByName(OperationContext* txn, StringData name) {
+StatusWith<LocksType> DistLockCatalogImpl::getLockByName(OperationContext* opCtx, StringData name) {
auto findResult =
- _findOnConfig(txn, kReadPref, _locksNS, BSON(LocksType::name() << name), BSONObj(), 1);
+ _findOnConfig(opCtx, kReadPref, _locksNS, BSON(LocksType::name() << name), BSONObj(), 1);
if (!findResult.isOK()) {
return findResult.getStatus();
@@ -491,13 +492,13 @@ StatusWith<LocksType> DistLockCatalogImpl::getLockByName(OperationContext* txn,
return locksTypeResult.getValue();
}
-Status DistLockCatalogImpl::stopPing(OperationContext* txn, StringData processId) {
+Status DistLockCatalogImpl::stopPing(OperationContext* opCtx, StringData processId) {
auto request =
FindAndModifyRequest::makeRemove(_lockPingNS, BSON(LockpingsType::process() << processId));
request.setWriteConcern(kMajorityWriteConcern);
auto resultStatus = _client->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
_locksNS.db().toString(),
request.toBSON(),
@@ -509,14 +510,14 @@ Status DistLockCatalogImpl::stopPing(OperationContext* txn, StringData processId
}
StatusWith<vector<BSONObj>> DistLockCatalogImpl::_findOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const NamespaceString& nss,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
auto result = _client->getConfigShard()->exhaustiveFindOnConfig(
- txn, readPref, repl::ReadConcernLevel::kMajorityReadConcern, nss, query, sort, limit);
+ opCtx, readPref, repl::ReadConcernLevel::kMajorityReadConcern, nss, query, sort, limit);
if (!result.isOK()) {
return result.getStatus();
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_impl.h b/src/mongo/s/catalog/dist_lock_catalog_impl.h
index 6f26a3a1235..ab1c88bd250 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_impl.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_impl.h
@@ -52,11 +52,12 @@ public:
virtual ~DistLockCatalogImpl();
- virtual StatusWith<LockpingsType> getPing(OperationContext* txn, StringData processID) override;
+ virtual StatusWith<LockpingsType> getPing(OperationContext* opCtx,
+ StringData processID) override;
- virtual Status ping(OperationContext* txn, StringData processID, Date_t ping) override;
+ virtual Status ping(OperationContext* opCtx, StringData processID, Date_t ping) override;
- virtual StatusWith<LocksType> grabLock(OperationContext* txn,
+ virtual StatusWith<LocksType> grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -65,7 +66,7 @@ public:
StringData why,
const WriteConcernOptions& writeConcern) override;
- virtual StatusWith<LocksType> overtakeLock(OperationContext* txn,
+ virtual StatusWith<LocksType> overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -74,27 +75,27 @@ public:
Date_t time,
StringData why) override;
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID) override;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) override;
- virtual Status unlock(OperationContext* txn,
+ virtual Status unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) override;
- virtual Status unlockAll(OperationContext* txn, const std::string& processID) override;
+ virtual Status unlockAll(OperationContext* opCtx, const std::string& processID) override;
- virtual StatusWith<ServerInfo> getServerInfo(OperationContext* txn) override;
+ virtual StatusWith<ServerInfo> getServerInfo(OperationContext* opCtx) override;
- virtual StatusWith<LocksType> getLockByTS(OperationContext* txn,
+ virtual StatusWith<LocksType> getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) override;
- virtual StatusWith<LocksType> getLockByName(OperationContext* txn, StringData name) override;
+ virtual StatusWith<LocksType> getLockByName(OperationContext* opCtx, StringData name) override;
- virtual Status stopPing(OperationContext* txn, StringData processId) override;
+ virtual Status stopPing(OperationContext* opCtx, StringData processId) override;
private:
- Status _unlock(OperationContext* txn, const FindAndModifyRequest& request);
+ Status _unlock(OperationContext* opCtx, const FindAndModifyRequest& request);
- StatusWith<std::vector<BSONObj>> _findOnConfig(OperationContext* txn,
+ StatusWith<std::vector<BSONObj>> _findOnConfig(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const NamespaceString& nss,
const BSONObj& query,
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
index c3b27185eef..2a31d602e7a 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.cpp
@@ -144,7 +144,7 @@ DistLockCatalogMock::DistLockCatalogMock()
DistLockCatalogMock::~DistLockCatalogMock() {}
-StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* txn,
+StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* opCtx,
StringData processID) {
auto ret = kLockpingsTypeBadRetValue;
GetPingFunc checkerFunc = noGetPingSet;
@@ -159,7 +159,7 @@ StatusWith<LockpingsType> DistLockCatalogMock::getPing(OperationContext* txn,
return ret;
}
-Status DistLockCatalogMock::ping(OperationContext* txn, StringData processID, Date_t ping) {
+Status DistLockCatalogMock::ping(OperationContext* opCtx, StringData processID, Date_t ping) {
auto ret = kBadRetValue;
PingFunc checkerFunc = noPingFuncSet;
@@ -173,7 +173,7 @@ Status DistLockCatalogMock::ping(OperationContext* txn, StringData processID, Da
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -194,7 +194,7 @@ StatusWith<LocksType> DistLockCatalogMock::grabLock(OperationContext* txn,
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -215,7 +215,7 @@ StatusWith<LocksType> DistLockCatalogMock::overtakeLock(OperationContext* txn,
return ret;
}
-Status DistLockCatalogMock::unlock(OperationContext* txn, const OID& lockSessionID) {
+Status DistLockCatalogMock::unlock(OperationContext* opCtx, const OID& lockSessionID) {
auto ret = kBadRetValue;
UnlockFunc checkerFunc = noUnLockFuncSet;
@@ -229,7 +229,7 @@ Status DistLockCatalogMock::unlock(OperationContext* txn, const OID& lockSession
return ret;
}
-Status DistLockCatalogMock::unlock(OperationContext* txn,
+Status DistLockCatalogMock::unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) {
auto ret = kBadRetValue;
@@ -246,7 +246,8 @@ Status DistLockCatalogMock::unlock(OperationContext* txn,
return ret;
}
-StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(OperationContext* txn) {
+StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(
+ OperationContext* opCtx) {
auto ret = kServerInfoBadRetValue;
GetServerInfoFunc checkerFunc = noGetServerInfoSet;
@@ -260,7 +261,7 @@ StatusWith<DistLockCatalog::ServerInfo> DistLockCatalogMock::getServerInfo(Opera
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* txn,
+StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) {
auto ret = kLocksTypeBadRetValue;
GetLockByTSFunc checkerFunc = noGetLockByTSSet;
@@ -275,7 +276,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByTS(OperationContext* txn,
return ret;
}
-StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* txn, StringData name) {
+StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* opCtx, StringData name) {
auto ret = kLocksTypeBadRetValue;
GetLockByNameFunc checkerFunc = noGetLockByNameSet;
@@ -289,7 +290,7 @@ StatusWith<LocksType> DistLockCatalogMock::getLockByName(OperationContext* txn,
return ret;
}
-Status DistLockCatalogMock::stopPing(OperationContext* txn, StringData processId) {
+Status DistLockCatalogMock::stopPing(OperationContext* opCtx, StringData processId) {
auto ret = kBadRetValue;
StopPingFunc checkerFunc = noStopPingFuncSet;
@@ -370,7 +371,7 @@ void DistLockCatalogMock::expectGetServerInfo(GetServerInfoFunc checkerFunc,
_getServerInfoReturnValue = returnThis;
}
-Status DistLockCatalogMock::unlockAll(OperationContext* txn, const std::string& processID) {
+Status DistLockCatalogMock::unlockAll(OperationContext* opCtx, const std::string& processID) {
return Status(ErrorCodes::IllegalOperation,
str::stream() << "unlockAll not expected to be called; processID: " << processID);
}
diff --git a/src/mongo/s/catalog/dist_lock_catalog_mock.h b/src/mongo/s/catalog/dist_lock_catalog_mock.h
index e29b5aa5fc7..7ea0eaa91f2 100644
--- a/src/mongo/s/catalog/dist_lock_catalog_mock.h
+++ b/src/mongo/s/catalog/dist_lock_catalog_mock.h
@@ -89,11 +89,12 @@ public:
using GetLockByNameFunc = stdx::function<void(StringData name)>;
using GetServerInfoFunc = stdx::function<void()>;
- virtual StatusWith<LockpingsType> getPing(OperationContext* txn, StringData processID) override;
+ virtual StatusWith<LockpingsType> getPing(OperationContext* opCtx,
+ StringData processID) override;
- virtual Status ping(OperationContext* txn, StringData processID, Date_t ping) override;
+ virtual Status ping(OperationContext* opCtx, StringData processID, Date_t ping) override;
- virtual StatusWith<LocksType> grabLock(OperationContext* txn,
+ virtual StatusWith<LocksType> grabLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
StringData who,
@@ -102,7 +103,7 @@ public:
StringData why,
const WriteConcernOptions& writeConcern) override;
- virtual StatusWith<LocksType> overtakeLock(OperationContext* txn,
+ virtual StatusWith<LocksType> overtakeLock(OperationContext* opCtx,
StringData lockID,
const OID& lockSessionID,
const OID& currentHolderTS,
@@ -111,22 +112,22 @@ public:
Date_t time,
StringData why) override;
- virtual Status unlock(OperationContext* txn, const OID& lockSessionID) override;
+ virtual Status unlock(OperationContext* opCtx, const OID& lockSessionID) override;
- virtual Status unlock(OperationContext* txn,
+ virtual Status unlock(OperationContext* opCtx,
const OID& lockSessionID,
StringData name) override;
- virtual Status unlockAll(OperationContext* txn, const std::string& processID) override;
+ virtual Status unlockAll(OperationContext* opCtx, const std::string& processID) override;
- virtual StatusWith<ServerInfo> getServerInfo(OperationContext* txn) override;
+ virtual StatusWith<ServerInfo> getServerInfo(OperationContext* opCtx) override;
- virtual StatusWith<LocksType> getLockByTS(OperationContext* txn,
+ virtual StatusWith<LocksType> getLockByTS(OperationContext* opCtx,
const OID& lockSessionID) override;
- virtual StatusWith<LocksType> getLockByName(OperationContext* txn, StringData name) override;
+ virtual StatusWith<LocksType> getLockByName(OperationContext* opCtx, StringData name) override;
- virtual Status stopPing(OperationContext* txn, StringData processId) override;
+ virtual Status stopPing(OperationContext* opCtx, StringData processId) override;
/**
* Sets the checker method to use and the return value for grabLock to return every
diff --git a/src/mongo/s/catalog/dist_lock_manager.cpp b/src/mongo/s/catalog/dist_lock_manager.cpp
index 426d62cb7e9..9e6326ecf91 100644
--- a/src/mongo/s/catalog/dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager.cpp
@@ -39,19 +39,19 @@ namespace mongo {
const Seconds DistLockManager::kDefaultLockTimeout(20);
const Milliseconds DistLockManager::kSingleLockAttemptTimeout(0);
-DistLockManager::ScopedDistLock::ScopedDistLock(OperationContext* txn,
+DistLockManager::ScopedDistLock::ScopedDistLock(OperationContext* opCtx,
DistLockHandle lockHandle,
DistLockManager* lockManager)
- : _txn(txn), _lockID(std::move(lockHandle)), _lockManager(lockManager) {}
+ : _opCtx(opCtx), _lockID(std::move(lockHandle)), _lockManager(lockManager) {}
DistLockManager::ScopedDistLock::~ScopedDistLock() {
if (_lockManager) {
- _lockManager->unlock(_txn, _lockID);
+ _lockManager->unlock(_opCtx, _lockID);
}
}
DistLockManager::ScopedDistLock::ScopedDistLock(ScopedDistLock&& other)
- : _txn(nullptr), _lockManager(nullptr) {
+ : _opCtx(nullptr), _lockManager(nullptr) {
*this = std::move(other);
}
@@ -59,9 +59,9 @@ DistLockManager::ScopedDistLock& DistLockManager::ScopedDistLock::operator=(
ScopedDistLock&& other) {
if (this != &other) {
invariant(_lockManager == nullptr);
- invariant(_txn == nullptr);
+ invariant(_opCtx == nullptr);
- _txn = other._txn;
+ _opCtx = other._opCtx;
_lockID = std::move(other._lockID);
_lockManager = other._lockManager;
other._lockManager = nullptr;
@@ -70,16 +70,16 @@ DistLockManager::ScopedDistLock& DistLockManager::ScopedDistLock::operator=(
return *this;
}
-StatusWith<DistLockManager::ScopedDistLock> DistLockManager::lock(OperationContext* txn,
+StatusWith<DistLockManager::ScopedDistLock> DistLockManager::lock(OperationContext* opCtx,
StringData name,
StringData whyMessage,
Milliseconds waitFor) {
- auto distLockHandleStatus = lockWithSessionID(txn, name, whyMessage, OID::gen(), waitFor);
+ auto distLockHandleStatus = lockWithSessionID(opCtx, name, whyMessage, OID::gen(), waitFor);
if (!distLockHandleStatus.isOK()) {
return distLockHandleStatus.getStatus();
}
- return DistLockManager::ScopedDistLock(txn, std::move(distLockHandleStatus.getValue()), this);
+ return DistLockManager::ScopedDistLock(opCtx, std::move(distLockHandleStatus.getValue()), this);
}
Status DistLockManager::ScopedDistLock::checkStatus() {
@@ -87,7 +87,7 @@ Status DistLockManager::ScopedDistLock::checkStatus() {
return Status(ErrorCodes::IllegalOperation, "no lock manager, lock was not acquired");
}
- return _lockManager->checkStatus(_txn, _lockID);
+ return _lockManager->checkStatus(_opCtx, _lockID);
}
} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager.h b/src/mongo/s/catalog/dist_lock_manager.h
index 0512a5dc481..18bb6bce786 100644
--- a/src/mongo/s/catalog/dist_lock_manager.h
+++ b/src/mongo/s/catalog/dist_lock_manager.h
@@ -76,7 +76,7 @@ public:
MONGO_DISALLOW_COPYING(ScopedDistLock);
public:
- ScopedDistLock(OperationContext* txn,
+ ScopedDistLock(OperationContext* opCtx,
DistLockHandle lockHandle,
DistLockManager* lockManager);
~ScopedDistLock();
@@ -90,7 +90,7 @@ public:
Status checkStatus();
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
DistLockHandle _lockID;
DistLockManager* _lockManager; // Not owned here.
};
@@ -107,7 +107,7 @@ public:
* Cleanup the manager's resources. Implementations do not need to guarantee thread safety
* so callers should employ proper synchronization when calling this method.
*/
- virtual void shutDown(OperationContext* txn) = 0;
+ virtual void shutDown(OperationContext* opCtx) = 0;
/**
* Returns the process ID for this DistLockManager.
@@ -127,7 +127,7 @@ public:
* Returns ErrorCodes::DistributedClockSkewed when a clock skew is detected.
* Returns ErrorCodes::LockBusy if the lock is being held.
*/
- StatusWith<ScopedDistLock> lock(OperationContext* txn,
+ StatusWith<ScopedDistLock> lock(OperationContext* opCtx,
StringData name,
StringData whyMessage,
Milliseconds waitFor);
@@ -140,7 +140,7 @@ public:
* immediately reacquired if "lockSessionID" matches that of the lock, rather than waiting for
* the inactive lock to expire.
*/
- virtual StatusWith<DistLockHandle> lockWithSessionID(OperationContext* txn,
+ virtual StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
@@ -151,7 +151,7 @@ public:
* anyone. Uses local write concern and does not attempt to overtake the lock or check whether
* the lock lease has expired.
*/
- virtual StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
+ virtual StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID) = 0;
@@ -160,26 +160,26 @@ public:
* Unlocks the given lockHandle. Will attempt to retry again later if the config
* server is not reachable.
*/
- virtual void unlock(OperationContext* txn, const DistLockHandle& lockHandle) = 0;
+ virtual void unlock(OperationContext* opCtx, const DistLockHandle& lockHandle) = 0;
/**
* Unlocks the lock specified by "lockHandle" and "name". Will attempt to retry again later if
* the config server is not reachable.
*/
- virtual void unlock(OperationContext* txn,
+ virtual void unlock(OperationContext* opCtx,
const DistLockHandle& lockHandle,
StringData name) = 0;
/**
* Makes a best-effort attempt to unlock all locks owned by the given processID.
*/
- virtual void unlockAll(OperationContext* txn, const std::string& processID) = 0;
+ virtual void unlockAll(OperationContext* opCtx, const std::string& processID) = 0;
protected:
/**
* Checks if the lockHandle still exists in the config server.
*/
- virtual Status checkStatus(OperationContext* txn, const DistLockHandle& lockHandle) = 0;
+ virtual Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) = 0;
};
} // namespace mongo
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.cpp b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
index 18bd8a8ba6c..710eca59cf6 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.cpp
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.cpp
@@ -59,7 +59,7 @@ DistLockManagerMock::DistLockManagerMock(std::unique_ptr<DistLockCatalog> catalo
void DistLockManagerMock::startUp() {}
-void DistLockManagerMock::shutDown(OperationContext* txn) {
+void DistLockManagerMock::shutDown(OperationContext* opCtx) {
uassert(28659, "DistLockManagerMock shut down with outstanding locks present", _locks.empty());
}
@@ -67,7 +67,7 @@ std::string DistLockManagerMock::getProcessID() {
return "Mock dist lock manager process id";
}
-StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationContext* txn,
+StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
@@ -95,17 +95,17 @@ StatusWith<DistLockHandle> DistLockManagerMock::lockWithSessionID(OperationConte
}
StatusWith<DistLockHandle> DistLockManagerMock::tryLockWithLocalWriteConcern(
- OperationContext* txn, StringData name, StringData whyMessage, const OID& lockSessionID) {
+ OperationContext* opCtx, StringData name, StringData whyMessage, const OID& lockSessionID) {
// Not yet implemented
MONGO_UNREACHABLE;
}
-void DistLockManagerMock::unlockAll(OperationContext* txn, const std::string& processID) {
+void DistLockManagerMock::unlockAll(OperationContext* opCtx, const std::string& processID) {
// Not yet implemented
MONGO_UNREACHABLE;
}
-void DistLockManagerMock::unlock(OperationContext* txn, const DistLockHandle& lockHandle) {
+void DistLockManagerMock::unlock(OperationContext* opCtx, const DistLockHandle& lockHandle) {
std::vector<LockInfo>::iterator it =
std::find_if(_locks.begin(), _locks.end(), [&lockHandle](LockInfo info) -> bool {
return info.lockID == lockHandle;
@@ -116,7 +116,7 @@ void DistLockManagerMock::unlock(OperationContext* txn, const DistLockHandle& lo
_locks.erase(it);
}
-void DistLockManagerMock::unlock(OperationContext* txn,
+void DistLockManagerMock::unlock(OperationContext* opCtx,
const DistLockHandle& lockHandle,
StringData name) {
std::vector<LockInfo>::iterator it =
@@ -129,7 +129,7 @@ void DistLockManagerMock::unlock(OperationContext* txn,
_locks.erase(it);
}
-Status DistLockManagerMock::checkStatus(OperationContext* txn, const DistLockHandle& lockHandle) {
+Status DistLockManagerMock::checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) {
return Status::OK();
}
diff --git a/src/mongo/s/catalog/dist_lock_manager_mock.h b/src/mongo/s/catalog/dist_lock_manager_mock.h
index d137b0239e4..17d91d94b19 100644
--- a/src/mongo/s/catalog/dist_lock_manager_mock.h
+++ b/src/mongo/s/catalog/dist_lock_manager_mock.h
@@ -44,22 +44,22 @@ public:
virtual ~DistLockManagerMock() = default;
void startUp() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
std::string getProcessID() override;
- StatusWith<DistLockHandle> lockWithSessionID(OperationContext* txn,
+ StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
Milliseconds waitFor) override;
- StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
+ StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID) override;
- void unlockAll(OperationContext* txn, const std::string& processID) override;
+ void unlockAll(OperationContext* opCtx, const std::string& processID) override;
using LockFunc =
stdx::function<void(StringData name, StringData whyMessage, Milliseconds waitFor)>;
@@ -67,11 +67,13 @@ public:
void expectLock(LockFunc checkerFunc, Status lockStatus);
protected:
- void unlock(OperationContext* txn, const DistLockHandle& lockHandle) override;
+ void unlock(OperationContext* opCtx, const DistLockHandle& lockHandle) override;
- void unlock(OperationContext* txn, const DistLockHandle& lockHandle, StringData name) override;
+ void unlock(OperationContext* opCtx,
+ const DistLockHandle& lockHandle,
+ StringData name) override;
- Status checkStatus(OperationContext* txn, const DistLockHandle& lockHandle) override;
+ Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockHandle) override;
private:
struct LockInfo {
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.cpp b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
index eff613f66b6..98fbcf94ceb 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.cpp
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.cpp
@@ -89,7 +89,7 @@ void ReplSetDistLockManager::startUp() {
}
}
-void ReplSetDistLockManager::shutDown(OperationContext* txn) {
+void ReplSetDistLockManager::shutDown(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
_isShutDown = true;
@@ -103,7 +103,7 @@ void ReplSetDistLockManager::shutDown(OperationContext* txn) {
_execThread.reset();
}
- auto status = _catalog->stopPing(txn, _processID);
+ auto status = _catalog->stopPing(opCtx, _processID);
if (!status.isOK()) {
warning() << "error encountered while cleaning up distributed ping entry for " << _processID
<< causedBy(redact(status));
@@ -128,8 +128,8 @@ void ReplSetDistLockManager::doTask() {
while (!isShutDown()) {
{
- auto txn = cc().makeOperationContext();
- auto pingStatus = _catalog->ping(txn.get(), _processID, Date_t::now());
+ auto opCtx = cc().makeOperationContext();
+ auto pingStatus = _catalog->ping(opCtx.get(), _processID, Date_t::now());
if (!pingStatus.isOK() && pingStatus != ErrorCodes::NotMaster) {
warning() << "pinging failed for distributed lock pinger" << causedBy(pingStatus);
@@ -154,10 +154,10 @@ void ReplSetDistLockManager::doTask() {
"status unlock not initialized!");
if (toUnlock.second) {
// A non-empty _id (name) field was provided, unlock by ts (sessionId) and _id.
- unlockStatus = _catalog->unlock(txn.get(), toUnlock.first, *toUnlock.second);
+ unlockStatus = _catalog->unlock(opCtx.get(), toUnlock.first, *toUnlock.second);
nameMessage = " and " + LocksType::name() + ": " + *toUnlock.second;
} else {
- unlockStatus = _catalog->unlock(txn.get(), toUnlock.first);
+ unlockStatus = _catalog->unlock(opCtx.get(), toUnlock.first);
}
if (!unlockStatus.isOK()) {
@@ -180,11 +180,11 @@ void ReplSetDistLockManager::doTask() {
}
}
-StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
+StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* opCtx,
LocksType lockDoc,
const Milliseconds& lockExpiration) {
const auto& processID = lockDoc.getProcess();
- auto pingStatus = _catalog->getPing(txn, processID);
+ auto pingStatus = _catalog->getPing(opCtx, processID);
Date_t pingValue;
if (pingStatus.isOK()) {
@@ -202,7 +202,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
} // else use default pingValue if ping document does not exist.
Timer timer(_serviceContext->getTickSource());
- auto serverInfoStatus = _catalog->getServerInfo(txn);
+ auto serverInfoStatus = _catalog->getServerInfo(opCtx);
if (!serverInfoStatus.isOK()) {
if (serverInfoStatus.getStatus() == ErrorCodes::NotMaster) {
return false;
@@ -277,7 +277,7 @@ StatusWith<bool> ReplSetDistLockManager::isLockExpired(OperationContext* txn,
return false;
}
-StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationContext* txn,
+StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
@@ -290,7 +290,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// independent write operations.
int networkErrorRetries = 0;
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
// Distributed lock acquisition works by tring to update the state of the lock to 'taken'. If
// the lock is currently taken, we will back off and try the acquisition again, repeating this
@@ -312,7 +312,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
<< " with lockSessionID: " << lockSessionID << ", why: " << whyMessage.toString();
auto lockResult = _catalog->grabLock(
- txn, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
+ opCtx, name, lockSessionID, who, _processID, Date_t::now(), whyMessage.toString());
auto status = lockResult.getStatus();
@@ -333,7 +333,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
networkErrorRetries++;
- status = _catalog->unlock(txn, lockSessionID, name);
+ status = _catalog->unlock(opCtx, lockSessionID, name);
if (status.isOK()) {
// We certainly do not own the lock, so we can retry
continue;
@@ -355,7 +355,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
}
// Get info from current lock and check if we can overtake it.
- auto getLockStatusResult = _catalog->getLockByName(txn, name);
+ auto getLockStatusResult = _catalog->getLockByName(opCtx, name);
const auto& getLockStatus = getLockStatusResult.getStatus();
if (!getLockStatusResult.isOK() && getLockStatus != ErrorCodes::LockNotFound) {
@@ -366,14 +366,14 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
// found, use the normal grab lock path to acquire it.
if (getLockStatusResult.isOK()) {
auto currentLock = getLockStatusResult.getValue();
- auto isLockExpiredResult = isLockExpired(txn, currentLock, lockExpiration);
+ auto isLockExpiredResult = isLockExpired(opCtx, currentLock, lockExpiration);
if (!isLockExpiredResult.isOK()) {
return isLockExpiredResult.getStatus();
}
if (isLockExpiredResult.getValue() || (lockSessionID == currentLock.getLockID())) {
- auto overtakeResult = _catalog->overtakeLock(txn,
+ auto overtakeResult = _catalog->overtakeLock(opCtx,
name,
lockSessionID,
currentLock.getLockID(),
@@ -429,7 +429,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::lockWithSessionID(OperationCo
}
StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
- OperationContext* txn, StringData name, StringData whyMessage, const OID& lockSessionID) {
+ OperationContext* opCtx, StringData name, StringData whyMessage, const OID& lockSessionID) {
const string who = str::stream() << _processID << ":" << getThreadName();
LOG(1) << "trying to acquire new distributed lock for " << name
@@ -438,7 +438,7 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
<< " ms, process : " << _processID << " )"
<< " with lockSessionID: " << lockSessionID << ", why: " << whyMessage.toString();
- auto lockStatus = _catalog->grabLock(txn,
+ auto lockStatus = _catalog->grabLock(opCtx,
name,
lockSessionID,
who,
@@ -462,8 +462,8 @@ StatusWith<DistLockHandle> ReplSetDistLockManager::tryLockWithLocalWriteConcern(
return lockStatus.getStatus();
}
-void ReplSetDistLockManager::unlock(OperationContext* txn, const DistLockHandle& lockSessionID) {
- auto unlockStatus = _catalog->unlock(txn, lockSessionID);
+void ReplSetDistLockManager::unlock(OperationContext* opCtx, const DistLockHandle& lockSessionID) {
+ auto unlockStatus = _catalog->unlock(opCtx, lockSessionID);
if (!unlockStatus.isOK()) {
queueUnlock(lockSessionID, boost::none);
@@ -473,10 +473,10 @@ void ReplSetDistLockManager::unlock(OperationContext* txn, const DistLockHandle&
}
}
-void ReplSetDistLockManager::unlock(OperationContext* txn,
+void ReplSetDistLockManager::unlock(OperationContext* opCtx,
const DistLockHandle& lockSessionID,
StringData name) {
- auto unlockStatus = _catalog->unlock(txn, lockSessionID, name);
+ auto unlockStatus = _catalog->unlock(opCtx, lockSessionID, name);
if (!unlockStatus.isOK()) {
queueUnlock(lockSessionID, name.toString());
@@ -486,17 +486,17 @@ void ReplSetDistLockManager::unlock(OperationContext* txn,
}
}
-void ReplSetDistLockManager::unlockAll(OperationContext* txn, const std::string& processID) {
- Status status = _catalog->unlockAll(txn, processID);
+void ReplSetDistLockManager::unlockAll(OperationContext* opCtx, const std::string& processID) {
+ Status status = _catalog->unlockAll(opCtx, processID);
if (!status.isOK()) {
warning() << "Error while trying to unlock existing distributed locks"
<< causedBy(redact(status));
}
}
-Status ReplSetDistLockManager::checkStatus(OperationContext* txn,
+Status ReplSetDistLockManager::checkStatus(OperationContext* opCtx,
const DistLockHandle& lockHandle) {
- return _catalog->getLockByTS(txn, lockHandle).getStatus();
+ return _catalog->getLockByTS(opCtx, lockHandle).getStatus();
}
void ReplSetDistLockManager::queueUnlock(const DistLockHandle& lockSessionID,
diff --git a/src/mongo/s/catalog/replset_dist_lock_manager.h b/src/mongo/s/catalog/replset_dist_lock_manager.h
index d6db97ebc7a..278e6e4a78c 100644
--- a/src/mongo/s/catalog/replset_dist_lock_manager.h
+++ b/src/mongo/s/catalog/replset_dist_lock_manager.h
@@ -64,31 +64,31 @@ public:
virtual ~ReplSetDistLockManager();
void startUp() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
std::string getProcessID() override;
- StatusWith<DistLockHandle> lockWithSessionID(OperationContext* txn,
+ StatusWith<DistLockHandle> lockWithSessionID(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID,
Milliseconds waitFor) override;
- StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* txn,
+ StatusWith<DistLockHandle> tryLockWithLocalWriteConcern(OperationContext* opCtx,
StringData name,
StringData whyMessage,
const OID& lockSessionID) override;
- void unlock(OperationContext* txn, const DistLockHandle& lockSessionID) override;
+ void unlock(OperationContext* opCtx, const DistLockHandle& lockSessionID) override;
- void unlock(OperationContext* txn,
+ void unlock(OperationContext* opCtx,
const DistLockHandle& lockSessionID,
StringData name) override;
- void unlockAll(OperationContext* txn, const std::string& processID) override;
+ void unlockAll(OperationContext* opCtx, const std::string& processID) override;
protected:
- Status checkStatus(OperationContext* txn, const DistLockHandle& lockSessionID) override;
+ Status checkStatus(OperationContext* opCtx, const DistLockHandle& lockSessionID) override;
private:
/**
@@ -110,7 +110,7 @@ private:
* Returns true if the current process that owns the lock has no fresh pings since
* the lock expiration threshold.
*/
- StatusWith<bool> isLockExpired(OperationContext* txn,
+ StatusWith<bool> isLockExpired(OperationContext* opCtx,
const LocksType lockDoc,
const Milliseconds& lockExpiration);
diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h
index f334c05a477..ee0b5a2384f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client.h
+++ b/src/mongo/s/catalog/sharding_catalog_client.h
@@ -108,7 +108,7 @@ public:
/**
* Performs necessary cleanup when shutting down cleanly.
*/
- virtual void shutDown(OperationContext* txn) = 0;
+ virtual void shutDown(OperationContext* opCtx) = 0;
/**
* Creates a new database or updates the sharding status for an existing one. Cannot be
@@ -120,7 +120,7 @@ public:
* - DatabaseDifferCase - database already exists, but with a different case
* - ShardNotFound - could not find a shard to place the DB on
*/
- virtual Status enableSharding(OperationContext* txn, const std::string& dbName) = 0;
+ virtual Status enableSharding(OperationContext* opCtx, const std::string& dbName) = 0;
/**
* Shards a collection. Assumes that the database is enabled for sharding.
@@ -141,7 +141,7 @@ public:
* operations are writing to the same output collection, for instance.
*
*/
- virtual Status shardCollection(OperationContext* txn,
+ virtual Status shardCollection(OperationContext* opCtx,
const std::string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -157,13 +157,13 @@ public:
* Because of the asynchronous nature of the draining mechanism, this method returns
* the current draining status. See ShardDrainingStatus enum definition for more details.
*/
- virtual StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
+ virtual StatusWith<ShardDrainingStatus> removeShard(OperationContext* opCtx,
const ShardId& name) = 0;
/**
* Updates or creates the metadata for a given database.
*/
- virtual Status updateDatabase(OperationContext* txn,
+ virtual Status updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) = 0;
@@ -177,13 +177,13 @@ public:
* the failure. These are some of the known failures:
* - NamespaceNotFound - database does not exist
*/
- virtual StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* txn,
+ virtual StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx,
const std::string& dbName) = 0;
/**
* Updates or creates the metadata for a given collection.
*/
- virtual Status updateCollection(OperationContext* txn,
+ virtual Status updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) = 0;
@@ -198,7 +198,7 @@ public:
* - NamespaceNotFound - collection does not exist
*/
virtual StatusWith<repl::OpTimeWith<CollectionType>> getCollection(
- OperationContext* txn, const std::string& collNs) = 0;
+ OperationContext* opCtx, const std::string& collNs) = 0;
/**
* Retrieves all collections undera specified database (or in the system).
@@ -212,7 +212,7 @@ public:
*
* Returns a !OK status if an error occurs.
*/
- virtual Status getCollections(OperationContext* txn,
+ virtual Status getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
repl::OpTime* optime) = 0;
@@ -224,14 +224,14 @@ public:
* some of the known failures:
* - NamespaceNotFound - collection does not exist
*/
- virtual Status dropCollection(OperationContext* txn, const NamespaceString& ns) = 0;
+ virtual Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) = 0;
/**
* Retrieves all databases for a shard.
*
* Returns a !OK status if an error occurs.
*/
- virtual Status getDatabasesForShard(OperationContext* txn,
+ virtual Status getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardId,
std::vector<std::string>* dbs) = 0;
@@ -249,7 +249,7 @@ public:
*
* Returns a !OK status if an error occurs.
*/
- virtual Status getChunks(OperationContext* txn,
+ virtual Status getChunks(OperationContext* opCtx,
const BSONObj& filter,
const BSONObj& sort,
boost::optional<int> limit,
@@ -260,7 +260,7 @@ public:
/**
* Retrieves all tags for the specified collection.
*/
- virtual Status getTagsForCollection(OperationContext* txn,
+ virtual Status getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) = 0;
@@ -269,7 +269,7 @@ public:
* Returns a !OK status if an error occurs.
*/
virtual StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) = 0;
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) = 0;
/**
* Runs a user management command on the config servers, potentially synchronizing through
@@ -281,7 +281,7 @@ public:
* @param result: contains data returned from config servers
* Returns true on success.
*/
- virtual bool runUserManagementWriteCommand(OperationContext* txn,
+ virtual bool runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
@@ -290,7 +290,7 @@ public:
/**
* Runs a user management related read-only command on a config server.
*/
- virtual bool runUserManagementReadCommand(OperationContext* txn,
+ virtual bool runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) = 0;
@@ -313,7 +313,7 @@ public:
* failure because the precondition no longer matches. If a query of the chunks collection
* returns a document matching both 'nss' and 'lastChunkVersion,' the write succeeded.
*/
- virtual Status applyChunkOpsDeprecated(OperationContext* txn,
+ virtual Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -324,7 +324,7 @@ public:
/**
* Writes a diagnostic event to the action log.
*/
- virtual Status logAction(OperationContext* txn,
+ virtual Status logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) = 0;
@@ -332,7 +332,7 @@ public:
/**
* Writes a diagnostic event to the change log.
*/
- virtual Status logChange(OperationContext* txn,
+ virtual Status logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
@@ -348,13 +348,13 @@ public:
* Returns ErrorCodes::NoMatchingDocument if no such key exists or the BSON content of the
* setting otherwise.
*/
- virtual StatusWith<BSONObj> getGlobalSettings(OperationContext* txn, StringData key) = 0;
+ virtual StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) = 0;
/**
* Returns the contents of the config.version document - containing the current cluster schema
* version as well as the clusterID.
*/
- virtual StatusWith<VersionType> getConfigVersion(OperationContext* txn,
+ virtual StatusWith<VersionType> getConfigVersion(OperationContext* opCtx,
repl::ReadConcernLevel readConcern) = 0;
/**
@@ -367,7 +367,7 @@ public:
* @param request Request to be sent to the config server.
* @param response Out parameter to receive the response. Can be nullptr.
*/
- virtual void writeConfigServerDirect(OperationContext* txn,
+ virtual void writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) = 0;
@@ -383,7 +383,7 @@ public:
* - DatabaseDifferCase - database already exists, but with a different case
* - ShardNotFound - could not find a shard to place the DB on
*/
- virtual Status createDatabase(OperationContext* txn, const std::string& dbName) = 0;
+ virtual Status createDatabase(OperationContext* opCtx, const std::string& dbName) = 0;
/**
* Directly inserts a document in the specified namespace on the config server. The document
@@ -391,7 +391,7 @@ public:
*
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
- virtual Status insertConfigDocument(OperationContext* txn,
+ virtual Status insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) = 0;
@@ -410,7 +410,7 @@ public:
*
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
- virtual StatusWith<bool> updateConfigDocument(OperationContext* txn,
+ virtual StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
@@ -423,7 +423,7 @@ public:
*
* NOTE: Should not be used in new code outside the ShardingCatalogManager.
*/
- virtual Status removeConfigDocuments(OperationContext* txn,
+ virtual Status removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) = 0;
@@ -433,7 +433,7 @@ public:
* format for listDatabases, based on the listDatabases command parameters in
* 'listDatabasesCmd'.
*/
- virtual Status appendInfoForConfigServerDatabases(OperationContext* txn,
+ virtual Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
const BSONObj& listDatabasesCmd,
BSONArrayBuilder* builder) = 0;
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
index 676372be4fd..f541dde581f 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp
@@ -121,7 +121,7 @@ void toBatchError(const Status& status, BatchedCommandResponse* response) {
* Creates and writes to the config server the first chunks for a newly sharded collection. Returns
* the version generated for the collection.
*/
-StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
+StatusWith<ChunkVersion> createFirstChunks(OperationContext* opCtx,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
const ShardId& primaryShardId,
@@ -136,10 +136,10 @@ StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
if (initPoints.empty()) {
// If no split points were specified use the shard's data distribution to determine them
auto primaryShard =
- uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, primaryShardId));
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, primaryShardId));
auto result = uassertStatusOK(primaryShard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
nss.db().toString(),
BSON("count" << nss.coll()),
@@ -151,12 +151,12 @@ StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
if (numObjects > 0) {
splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints(
- txn,
+ opCtx,
primaryShardId,
nss,
shardKeyPattern,
ChunkRange(keyPattern.globalMin(), keyPattern.globalMax()),
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
0));
}
@@ -207,8 +207,8 @@ StatusWith<ChunkVersion> createFirstChunks(OperationContext* txn,
chunk.setShard(shardIds[i % shardIds.size()]);
chunk.setVersion(version);
- Status status = Grid::get(txn)->catalogClient(txn)->insertConfigDocument(
- txn,
+ Status status = Grid::get(opCtx)->catalogClient(opCtx)->insertConfigDocument(
+ opCtx,
ChunkType::ConfigNS,
chunk.toConfigBSON(),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -240,7 +240,7 @@ Status ShardingCatalogClientImpl::startup() {
return Status::OK();
}
-void ShardingCatalogClientImpl::shutDown(OperationContext* txn) {
+void ShardingCatalogClientImpl::shutDown(OperationContext* opCtx) {
LOG(1) << "ShardingCatalogClientImpl::shutDown() called.";
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -248,15 +248,15 @@ void ShardingCatalogClientImpl::shutDown(OperationContext* txn) {
}
invariant(_distLockManager);
- _distLockManager->shutDown(txn);
+ _distLockManager->shutDown(opCtx);
}
-Status ShardingCatalogClientImpl::updateCollection(OperationContext* txn,
+Status ShardingCatalogClientImpl::updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) {
fassert(28634, coll.validate());
- auto status = updateConfigDocument(txn,
+ auto status = updateConfigDocument(opCtx,
CollectionType::ConfigNS,
BSON(CollectionType::fullNs(collNs)),
coll.toBSON(),
@@ -271,12 +271,12 @@ Status ShardingCatalogClientImpl::updateCollection(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::updateDatabase(OperationContext* txn,
+Status ShardingCatalogClientImpl::updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) {
fassert(28616, db.validate());
- auto status = updateConfigDocument(txn,
+ auto status = updateConfigDocument(opCtx,
DatabaseType::ConfigNS,
BSON(DatabaseType::name(dbName)),
db.toBSON(),
@@ -291,7 +291,8 @@ Status ShardingCatalogClientImpl::updateDatabase(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientImpl::createDatabase(OperationContext* opCtx,
+ const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
// The admin and config databases should never be explicitly created. They "just exist",
@@ -301,19 +302,19 @@ Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const st
// Lock the database globally to prevent conflicts with simultaneous database creation.
auto scopedDistLock = getDistLockManager()->lock(
- txn, dbName, "createDatabase", DistLockManager::kDefaultLockTimeout);
+ opCtx, dbName, "createDatabase", DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
// check for case sensitivity violations
- Status status = _checkDbDoesNotExist(txn, dbName, nullptr);
+ Status status = _checkDbDoesNotExist(opCtx, dbName, nullptr);
if (!status.isOK()) {
return status;
}
// Database does not exist, pick a shard and create a new entry
- auto newShardIdStatus = _selectShardForNewDatabase(txn, grid.shardRegistry());
+ auto newShardIdStatus = _selectShardForNewDatabase(opCtx, grid.shardRegistry());
if (!newShardIdStatus.isOK()) {
return newShardIdStatus.getStatus();
}
@@ -328,7 +329,7 @@ Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const st
db.setSharded(false);
status = insertConfigDocument(
- txn, DatabaseType::ConfigNS, db.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, DatabaseType::ConfigNS, db.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
if (status.code() == ErrorCodes::DuplicateKey) {
return Status(ErrorCodes::NamespaceExists, "database " + dbName + " already exists");
}
@@ -336,12 +337,12 @@ Status ShardingCatalogClientImpl::createDatabase(OperationContext* txn, const st
return status;
}
-Status ShardingCatalogClientImpl::logAction(OperationContext* txn,
+Status ShardingCatalogClientImpl::logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) {
if (_actionLogCollectionCreated.load() == 0) {
- Status result = _createCappedConfigCollection(txn,
+ Status result = _createCappedConfigCollection(opCtx,
kActionLogCollectionName,
kActionLogCollectionSizeMB,
ShardingCatalogClient::kMajorityWriteConcern);
@@ -353,7 +354,7 @@ Status ShardingCatalogClientImpl::logAction(OperationContext* txn,
}
}
- return _log(txn,
+ return _log(opCtx,
kActionLogCollectionName,
what,
ns,
@@ -361,7 +362,7 @@ Status ShardingCatalogClientImpl::logAction(OperationContext* txn,
ShardingCatalogClient::kMajorityWriteConcern);
}
-Status ShardingCatalogClientImpl::logChange(OperationContext* txn,
+Status ShardingCatalogClientImpl::logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
@@ -370,7 +371,7 @@ Status ShardingCatalogClientImpl::logChange(OperationContext* txn,
writeConcern.wMode == WriteConcernOptions::kMajority);
if (_changeLogCollectionCreated.load() == 0) {
Status result = _createCappedConfigCollection(
- txn, kChangeLogCollectionName, kChangeLogCollectionSizeMB, writeConcern);
+ opCtx, kChangeLogCollectionName, kChangeLogCollectionSizeMB, writeConcern);
if (result.isOK()) {
_changeLogCollectionCreated.store(1);
} else {
@@ -379,17 +380,17 @@ Status ShardingCatalogClientImpl::logChange(OperationContext* txn,
}
}
- return _log(txn, kChangeLogCollectionName, what, ns, detail, writeConcern);
+ return _log(opCtx, kChangeLogCollectionName, what, ns, detail, writeConcern);
}
// static
StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
- OperationContext* txn, ShardRegistry* shardRegistry) {
+ OperationContext* opCtx, ShardRegistry* shardRegistry) {
vector<ShardId> allShardIds;
shardRegistry->getAllShardIds(&allShardIds);
if (allShardIds.empty()) {
- shardRegistry->reload(txn);
+ shardRegistry->reload(opCtx);
shardRegistry->getAllShardIds(&allShardIds);
if (allShardIds.empty()) {
@@ -399,7 +400,7 @@ StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
ShardId candidateShardId = allShardIds[0];
- auto candidateSizeStatus = shardutil::retrieveTotalShardSize(txn, candidateShardId);
+ auto candidateSizeStatus = shardutil::retrieveTotalShardSize(opCtx, candidateShardId);
if (!candidateSizeStatus.isOK()) {
return candidateSizeStatus.getStatus();
}
@@ -407,7 +408,7 @@ StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
for (size_t i = 1; i < allShardIds.size(); i++) {
const ShardId shardId = allShardIds[i];
- const auto sizeStatus = shardutil::retrieveTotalShardSize(txn, shardId);
+ const auto sizeStatus = shardutil::retrieveTotalShardSize(opCtx, shardId);
if (!sizeStatus.isOK()) {
return sizeStatus.getStatus();
}
@@ -421,7 +422,8 @@ StatusWith<ShardId> ShardingCatalogClientImpl::_selectShardForNewDatabase(
return candidateShardId;
}
-Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientImpl::enableSharding(OperationContext* opCtx,
+ const std::string& dbName) {
invariant(nsIsDbOnly(dbName));
if (dbName == NamespaceString::kConfigDb || dbName == NamespaceString::kAdminDb) {
@@ -433,7 +435,7 @@ Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const st
// Lock the database globally to prevent conflicts with simultaneous database
// creation/modification.
auto scopedDistLock = getDistLockManager()->lock(
- txn, dbName, "enableSharding", DistLockManager::kDefaultLockTimeout);
+ opCtx, dbName, "enableSharding", DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
@@ -441,10 +443,10 @@ Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const st
// Check for case sensitivity violations
DatabaseType db;
- Status status = _checkDbDoesNotExist(txn, dbName, &db);
+ Status status = _checkDbDoesNotExist(opCtx, dbName, &db);
if (status.isOK()) {
// Database does not exist, create a new entry
- auto newShardIdStatus = _selectShardForNewDatabase(txn, grid.shardRegistry());
+ auto newShardIdStatus = _selectShardForNewDatabase(opCtx, grid.shardRegistry());
if (!newShardIdStatus.isOK()) {
return newShardIdStatus.getStatus();
}
@@ -470,23 +472,23 @@ Status ShardingCatalogClientImpl::enableSharding(OperationContext* txn, const st
log() << "Enabling sharding for database [" << dbName << "] in config db";
- return updateDatabase(txn, dbName, db);
+ return updateDatabase(opCtx, dbName, db);
}
-Status ShardingCatalogClientImpl::_log(OperationContext* txn,
+Status ShardingCatalogClientImpl::_log(OperationContext* opCtx,
const StringData& logCollName,
const std::string& what,
const std::string& operationNS,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) {
- Date_t now = Grid::get(txn)->getNetwork()->now();
- const std::string hostName = Grid::get(txn)->getNetwork()->getHostName();
+ Date_t now = Grid::get(opCtx)->getNetwork()->now();
+ const std::string hostName = Grid::get(opCtx)->getNetwork()->getHostName();
const string changeId = str::stream() << hostName << "-" << now.toString() << "-" << OID::gen();
ChangeLogType changeLog;
changeLog.setChangeId(changeId);
changeLog.setServer(hostName);
- changeLog.setClientAddr(txn->getClient()->clientAddress(true));
+ changeLog.setClientAddr(opCtx->getClient()->clientAddress(true));
changeLog.setTime(now);
changeLog.setNS(operationNS);
changeLog.setWhat(what);
@@ -496,7 +498,7 @@ Status ShardingCatalogClientImpl::_log(OperationContext* txn,
log() << "about to log metadata event into " << logCollName << ": " << redact(changeLogBSON);
const NamespaceString nss("config", logCollName);
- Status result = insertConfigDocument(txn, nss.ns(), changeLogBSON, writeConcern);
+ Status result = insertConfigDocument(opCtx, nss.ns(), changeLogBSON, writeConcern);
if (!result.isOK()) {
warning() << "Error encountered while logging config change with ID [" << changeId
@@ -506,7 +508,7 @@ Status ShardingCatalogClientImpl::_log(OperationContext* txn,
return result;
}
-Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
+Status ShardingCatalogClientImpl::shardCollection(OperationContext* opCtx,
const string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -516,20 +518,20 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
// Lock the collection globally so that no other mongos can try to shard or drop the collection
// at the same time.
auto scopedDistLock = getDistLockManager()->lock(
- txn, ns, "shardCollection", DistLockManager::kDefaultLockTimeout);
+ opCtx, ns, "shardCollection", DistLockManager::kDefaultLockTimeout);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
- auto getDBStatus = getDatabase(txn, nsToDatabase(ns));
+ auto getDBStatus = getDatabase(opCtx, nsToDatabase(ns));
if (!getDBStatus.isOK()) {
return getDBStatus.getStatus();
}
- auto const shardRegistry = Grid::get(txn)->shardRegistry();
+ auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
ShardId dbPrimaryShardId = getDBStatus.getValue().value.getPrimary();
- const auto primaryShardStatus = shardRegistry->getShard(txn, dbPrimaryShardId);
+ const auto primaryShardStatus = shardRegistry->getShard(opCtx, dbPrimaryShardId);
if (!primaryShardStatus.isOK()) {
return primaryShardStatus.getStatus();
}
@@ -538,7 +540,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
// This is an extra safety check that there aren't any partially written chunks from a
// previous failed invocation of 'shardCollection'
auto countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::ns(ns)));
+ opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::ns(ns)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -567,7 +569,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
collectionDetail.append("numChunks", static_cast<int>(initPoints.size() + 1));
- logChange(txn,
+ logChange(opCtx,
"shardCollection.start",
ns,
collectionDetail.obj(),
@@ -579,8 +581,8 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
// Construct the collection default collator.
std::unique_ptr<CollatorInterface> defaultCollator;
if (!defaultCollation.isEmpty()) {
- auto statusWithCollator =
- CollatorFactoryInterface::get(txn->getServiceContext())->makeFromBSON(defaultCollation);
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
+ ->makeFromBSON(defaultCollation);
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
}
@@ -588,7 +590,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
}
auto createFirstChunksStatus =
- createFirstChunks(txn, nss, fieldsAndOrder, dbPrimaryShardId, initPoints, initShardIds);
+ createFirstChunks(opCtx, nss, fieldsAndOrder, dbPrimaryShardId, initPoints, initShardIds);
if (!createFirstChunksStatus.isOK()) {
return createFirstChunksStatus.getStatus();
}
@@ -607,7 +609,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
coll.setDefaultCollation(defaultCollator ? defaultCollator->getSpec().toBSON() : BSONObj());
coll.setUnique(unique);
- Status updateCollStatus = updateCollection(txn, ns, coll);
+ Status updateCollStatus = updateCollection(opCtx, ns, coll);
if (!updateCollStatus.isOK()) {
return updateCollStatus;
}
@@ -624,14 +626,14 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
collVersion,
true);
- auto shardStatus = shardRegistry->getShard(txn, dbPrimaryShardId);
+ auto shardStatus = shardRegistry->getShard(opCtx, dbPrimaryShardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto shard = shardStatus.getValue();
auto ssvResponse =
- shard->runCommandWithFixedRetryAttempts(txn,
+ shard->runCommandWithFixedRetryAttempts(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
ssv.toBSON(),
@@ -643,7 +645,7 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
<< dbPrimaryShardId << causedBy(redact(status));
}
- logChange(txn,
+ logChange(opCtx,
"shardCollection.end",
ns,
BSON("version" << collVersion.toString()),
@@ -652,12 +654,12 @@ Status ShardingCatalogClientImpl::shardCollection(OperationContext* txn,
return Status::OK();
}
-StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(OperationContext* txn,
+StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(OperationContext* opCtx,
const ShardId& shardId) {
// Check preconditions for removing the shard
string name = shardId.toString();
auto countStatus = _runCountCommandOnConfig(
- txn,
+ opCtx,
NamespaceString(ShardType::ConfigNS),
BSON(ShardType::name() << NE << name << ShardType::draining(true)));
if (!countStatus.isOK()) {
@@ -669,7 +671,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
}
countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name() << NE << name));
+ opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name() << NE << name));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -679,7 +681,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
// Figure out if shard is already draining
countStatus =
- _runCountCommandOnConfig(txn,
+ _runCountCommandOnConfig(opCtx,
NamespaceString(ShardType::ConfigNS),
BSON(ShardType::name() << name << ShardType::draining(true)));
if (!countStatus.isOK()) {
@@ -689,7 +691,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
if (countStatus.getValue() == 0) {
log() << "going to start draining shard: " << name;
- auto updateStatus = updateConfigDocument(txn,
+ auto updateStatus = updateConfigDocument(opCtx,
ShardType::ConfigNS,
BSON(ShardType::name() << name),
BSON("$set" << BSON(ShardType::draining(true))),
@@ -701,10 +703,10 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
return updateStatus.getStatus();
}
- grid.shardRegistry()->reload(txn);
+ grid.shardRegistry()->reload(opCtx);
// Record start in changelog
- logChange(txn,
+ logChange(opCtx,
"removeShard.start",
"",
BSON("shard" << name),
@@ -715,14 +717,14 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
// Draining has already started, now figure out how many chunks and databases are still on the
// shard.
countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::shard(name)));
+ opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::shard(name)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
const long long chunkCount = countStatus.getValue();
countStatus = _runCountCommandOnConfig(
- txn, NamespaceString(DatabaseType::ConfigNS), BSON(DatabaseType::primary(name)));
+ opCtx, NamespaceString(DatabaseType::ConfigNS), BSON(DatabaseType::primary(name)));
if (!countStatus.isOK()) {
return countStatus.getStatus();
}
@@ -735,9 +737,9 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
// Draining is done, now finish removing the shard.
log() << "going to remove shard: " << name;
- audit::logRemoveShard(txn->getClient(), name);
+ audit::logRemoveShard(opCtx->getClient(), name);
- Status status = removeConfigDocuments(txn,
+ Status status = removeConfigDocuments(opCtx,
ShardType::ConfigNS,
BSON(ShardType::name() << name),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -750,10 +752,10 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
shardConnectionPool.removeHost(name);
ReplicaSetMonitor::remove(name);
- grid.shardRegistry()->reload(txn);
+ grid.shardRegistry()->reload(opCtx);
// Record finish in changelog
- logChange(txn,
+ logChange(opCtx,
"removeShard",
"",
BSON("shard" << name),
@@ -763,7 +765,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation
}
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabase(
- OperationContext* txn, const std::string& dbName) {
+ OperationContext* opCtx, const std::string& dbName) {
if (!NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)) {
return {ErrorCodes::InvalidNamespace, stream() << dbName << " is not a valid db name"};
}
@@ -778,12 +780,12 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas
return repl::OpTimeWith<DatabaseType>(dbt);
}
- auto result = _fetchDatabaseMetadata(txn, dbName, kConfigReadSelector);
+ auto result = _fetchDatabaseMetadata(opCtx, dbName, kConfigReadSelector);
if (result == ErrorCodes::NamespaceNotFound) {
// If we failed to find the database metadata on the 'nearest' config server, try again
// against the primary, in case the database was recently created.
- result =
- _fetchDatabaseMetadata(txn, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
+ result = _fetchDatabaseMetadata(
+ opCtx, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
if (!result.isOK() && (result != ErrorCodes::NamespaceNotFound)) {
return {result.getStatus().code(),
str::stream() << "Could not confirm non-existence of database " << dbName
@@ -796,10 +798,10 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas
}
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchDatabaseMetadata(
- OperationContext* txn, const std::string& dbName, const ReadPreferenceSetting& readPref) {
+ OperationContext* opCtx, const std::string& dbName, const ReadPreferenceSetting& readPref) {
dassert(dbName != "admin" && dbName != "config");
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
readPref,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(DatabaseType::ConfigNS),
@@ -826,8 +828,8 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData
}
StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getCollection(
- OperationContext* txn, const std::string& collNs) {
- auto statusFind = _exhaustiveFindOnConfig(txn,
+ OperationContext* opCtx, const std::string& collNs) {
+ auto statusFind = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(CollectionType::ConfigNS),
@@ -855,7 +857,7 @@ StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getColle
return repl::OpTimeWith<CollectionType>(parseStatus.getValue(), retOpTimePair.opTime);
}
-Status ShardingCatalogClientImpl::getCollections(OperationContext* txn,
+Status ShardingCatalogClientImpl::getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
OpTime* opTime) {
@@ -866,7 +868,7 @@ Status ShardingCatalogClientImpl::getCollections(OperationContext* txn,
string(str::stream() << "^" << pcrecpp::RE::QuoteMeta(*dbName) << "\\."));
}
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(CollectionType::ConfigNS),
@@ -901,14 +903,15 @@ Status ShardingCatalogClientImpl::getCollections(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const NamespaceString& ns) {
- logChange(txn,
+Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx,
+ const NamespaceString& ns) {
+ logChange(opCtx,
"dropCollection.start",
ns.ns(),
BSONObj(),
ShardingCatalogClientImpl::kMajorityWriteConcern);
- auto shardsStatus = getAllShards(txn, repl::ReadConcernLevel::kMajorityReadConcern);
+ auto shardsStatus = getAllShards(opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
return shardsStatus.getStatus();
}
@@ -923,7 +926,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
waitFor = Seconds(data["waitForSecs"].numberInt());
}
- auto scopedDistLock = getDistLockManager()->lock(txn, ns.ns(), "drop", waitFor);
+ auto scopedDistLock = getDistLockManager()->lock(opCtx, ns.ns(), "drop", waitFor);
if (!scopedDistLock.isOK()) {
return scopedDistLock.getStatus();
}
@@ -934,16 +937,16 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
auto* shardRegistry = grid.shardRegistry();
for (const auto& shardEntry : allShards) {
- auto shardStatus = shardRegistry->getShard(txn, shardEntry.getName());
+ auto shardStatus = shardRegistry->getShard(opCtx, shardEntry.getName());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto dropResult = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
ns.db().toString(),
BSON("drop" << ns.coll() << WriteConcernOptions::kWriteConcernField
- << txn->getWriteConcern().toBSON()),
+ << opCtx->getWriteConcern().toBSON()),
Shard::RetryPolicy::kIdempotent);
if (!dropResult.isOK()) {
@@ -987,7 +990,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
LOG(1) << "dropCollection " << ns << " shard data deleted";
// Remove chunk data
- Status result = removeConfigDocuments(txn,
+ Status result = removeConfigDocuments(opCtx,
ChunkType::ConfigNS,
BSON(ChunkType::ns(ns.ns())),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -1002,9 +1005,9 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
coll.setNs(ns);
coll.setDropped(true);
coll.setEpoch(ChunkVersion::DROPPED().epoch());
- coll.setUpdatedAt(Grid::get(txn)->getNetwork()->now());
+ coll.setUpdatedAt(Grid::get(opCtx)->getNetwork()->now());
- result = updateCollection(txn, ns.ns(), coll);
+ result = updateCollection(opCtx, ns.ns(), coll);
if (!result.isOK()) {
return result;
}
@@ -1020,14 +1023,14 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
ChunkVersion::DROPPED(),
true);
- auto shardStatus = shardRegistry->getShard(txn, shardEntry.getName());
+ auto shardStatus = shardRegistry->getShard(opCtx, shardEntry.getName());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto shard = shardStatus.getValue();
auto ssvResult = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
ssv.toBSON(),
@@ -1043,7 +1046,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
}
auto unsetShardingStatus = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("unsetSharding" << 1),
@@ -1061,7 +1064,7 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
LOG(1) << "dropCollection " << ns << " completed";
- logChange(txn,
+ logChange(opCtx,
"dropCollection",
ns.ns(),
BSONObj(),
@@ -1070,9 +1073,9 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* txn, const Na
return Status::OK();
}
-StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContext* txn,
+StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContext* opCtx,
StringData key) {
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
kSettingsNamespace,
@@ -1094,9 +1097,9 @@ StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContex
}
StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
kConfigReadSelector,
readConcern,
NamespaceString(VersionType::ConfigNS),
@@ -1142,10 +1145,10 @@ StatusWith<VersionType> ShardingCatalogClientImpl::getConfigVersion(
return versionTypeResult.getValue();
}
-Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* txn,
+Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardId,
vector<string>* dbs) {
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(DatabaseType::ConfigNS),
@@ -1170,7 +1173,7 @@ Status ShardingCatalogClientImpl::getDatabasesForShard(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
+Status ShardingCatalogClientImpl::getChunks(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& sort,
boost::optional<int> limit,
@@ -1183,7 +1186,7 @@ Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
// Convert boost::optional<int> to boost::optional<long long>.
auto longLimit = limit ? boost::optional<long long>(*limit) : boost::none;
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -1217,12 +1220,12 @@ Status ShardingCatalogClientImpl::getChunks(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* txn,
+Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) {
tags->clear();
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(TagsType::ConfigNS),
@@ -1252,9 +1255,9 @@ Status ShardingCatalogClientImpl::getTagsForCollection(OperationContext* txn,
}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
std::vector<ShardType> shards;
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
readConcern,
NamespaceString(ShardType::ConfigNS),
@@ -1287,7 +1290,7 @@ StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientImpl::
findStatus.getValue().opTime};
}
-bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext* txn,
+bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
@@ -1339,8 +1342,8 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
}
auto response =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
cmdToRun,
@@ -1361,7 +1364,7 @@ bool ShardingCatalogClientImpl::runUserManagementWriteCommand(OperationContext*
return true;
}
-bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
+bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
@@ -1370,8 +1373,8 @@ bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
_appendReadConcern(&cmdBuilder);
auto resultStatus =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn, kConfigReadSelector, dbname, cmdBuilder.done(), Shard::RetryPolicy::kIdempotent);
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx, kConfigReadSelector, dbname, cmdBuilder.done(), Shard::RetryPolicy::kIdempotent);
if (resultStatus.isOK()) {
result->appendElements(resultStatus.getValue().response);
return resultStatus.getValue().commandStatus.isOK();
@@ -1380,13 +1383,13 @@ bool ShardingCatalogClientImpl::runReadCommandForTest(OperationContext* txn,
return Command::appendCommandStatus(*result, resultStatus.getStatus());
}
-bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* txn,
+bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
auto resultStatus =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
kConfigPrimaryPreferredSelector,
dbname,
cmdObj,
@@ -1400,7 +1403,7 @@ bool ShardingCatalogClientImpl::runUserManagementReadCommand(OperationContext* t
return Command::appendCommandStatus(*result, resultStatus.getStatus());
}
-Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
+Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -1415,8 +1418,8 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
<< writeConcern.toBSON());
auto response =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"config",
cmd,
@@ -1456,7 +1459,7 @@ Status ShardingCatalogClientImpl::applyChunkOpsDeprecated(OperationContext* txn,
lastChunkVersion.addToBSON(query, ChunkType::DEPRECATED_lastmod());
query.append(ChunkType::ns(), nss);
Status chunkStatus =
- getChunks(txn, query.obj(), BSONObj(), 1, &newestChunk, nullptr, readConcern);
+ getChunks(opCtx, query.obj(), BSONObj(), 1, &newestChunk, nullptr, readConcern);
if (!chunkStatus.isOK()) {
errMsg = str::stream() << "getChunks function failed, unable to validate chunk "
@@ -1486,7 +1489,7 @@ DistLockManager* ShardingCatalogClientImpl::getDistLockManager() {
return _distLockManager.get();
}
-void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* txn,
+void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& batchRequest,
BatchedCommandResponse* batchResponse) {
// We only support batch sizes of one for config writes
@@ -1499,12 +1502,12 @@ void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* txn,
return;
}
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
*batchResponse = configShard->runBatchWriteCommandOnConfig(
- txn, batchRequest, Shard::RetryPolicy::kNotIdempotent);
+ opCtx, batchRequest, Shard::RetryPolicy::kNotIdempotent);
}
-Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
+Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
@@ -1521,10 +1524,10 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
request.setNS(nss);
request.setWriteConcern(writeConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
for (int retry = 1; retry <= kMaxWriteRetry; retry++) {
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kNoRetry);
+ configShard->runBatchWriteCommandOnConfig(opCtx, request, Shard::RetryPolicy::kNoRetry);
Status status = response.toStatus();
@@ -1544,7 +1547,7 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
LOG(1) << "Insert retry failed because of duplicate key error, rechecking.";
auto fetchDuplicate =
- _exhaustiveFindOnConfig(txn,
+ _exhaustiveFindOnConfig(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
nss,
@@ -1580,7 +1583,7 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* txn,
}
StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
- OperationContext* txn,
+ OperationContext* opCtx,
const string& ns,
const BSONObj& query,
const BSONObj& update,
@@ -1605,9 +1608,9 @@ StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
request.setNS(nss);
request.setWriteConcern(writeConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kIdempotent);
+ configShard->runBatchWriteCommandOnConfig(opCtx, request, Shard::RetryPolicy::kIdempotent);
Status status = response.toStatus();
if (!status.isOK()) {
@@ -1619,7 +1622,7 @@ StatusWith<bool> ShardingCatalogClientImpl::updateConfigDocument(
return (nSelected == 1);
}
-Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* txn,
+Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* opCtx,
const string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) {
@@ -1637,21 +1640,21 @@ Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* txn,
request.setNS(nss);
request.setWriteConcern(writeConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response =
- configShard->runBatchWriteCommandOnConfig(txn, request, Shard::RetryPolicy::kIdempotent);
+ configShard->runBatchWriteCommandOnConfig(opCtx, request, Shard::RetryPolicy::kIdempotent);
return response.toStatus();
}
-Status ShardingCatalogClientImpl::_checkDbDoesNotExist(OperationContext* txn,
+Status ShardingCatalogClientImpl::_checkDbDoesNotExist(OperationContext* opCtx,
const string& dbName,
DatabaseType* db) {
BSONObjBuilder queryBuilder;
queryBuilder.appendRegex(
DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i");
- auto findStatus = _exhaustiveFindOnConfig(txn,
+ auto findStatus = _exhaustiveFindOnConfig(opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(DatabaseType::ConfigNS),
@@ -1692,7 +1695,7 @@ Status ShardingCatalogClientImpl::_checkDbDoesNotExist(OperationContext* txn,
}
Status ShardingCatalogClientImpl::_createCappedConfigCollection(
- OperationContext* txn,
+ OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern) {
@@ -1701,8 +1704,8 @@ Status ShardingCatalogClientImpl::_createCappedConfigCollection(
<< writeConcern.toBSON());
auto result =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"config",
createCmd,
@@ -1728,7 +1731,7 @@ Status ShardingCatalogClientImpl::_createCappedConfigCollection(
return result.getValue().writeConcernStatus;
}
-StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(OperationContext* txn,
+StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
BSONObj query) {
BSONObjBuilder countBuilder;
@@ -1736,9 +1739,9 @@ StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(Operat
countBuilder.append("query", query);
_appendReadConcern(&countBuilder);
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto resultStatus =
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kConfigReadSelector,
ns.db().toString(),
countBuilder.done(),
@@ -1763,15 +1766,15 @@ StatusWith<long long> ShardingCatalogClientImpl::_runCountCommandOnConfig(Operat
}
StatusWith<repl::OpTimeWith<vector<BSONObj>>> ShardingCatalogClientImpl::_exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
repl::ReadConcernLevel readConcern,
const NamespaceString& nss,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
- auto response = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn, readPref, readConcern, nss, query, sort, limit);
+ auto response = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx, readPref, readConcern, nss, query, sort, limit);
if (!response.isOK()) {
return response.getStatus();
}
@@ -1787,10 +1790,10 @@ void ShardingCatalogClientImpl::_appendReadConcern(BSONObjBuilder* builder) {
}
Status ShardingCatalogClientImpl::appendInfoForConfigServerDatabases(
- OperationContext* txn, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ OperationContext* opCtx, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto resultStatus =
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kConfigPrimaryPreferredSelector,
"admin",
listDatabasesCmd,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h
index 898b3774456..0a94a3a18eb 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h
@@ -58,32 +58,32 @@ public:
*/
Status startup() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
- Status enableSharding(OperationContext* txn, const std::string& dbName) override;
+ Status enableSharding(OperationContext* opCtx, const std::string& dbName) override;
- Status updateDatabase(OperationContext* txn,
+ Status updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) override;
- Status updateCollection(OperationContext* txn,
+ Status updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) override;
- Status createDatabase(OperationContext* txn, const std::string& dbName) override;
+ Status createDatabase(OperationContext* opCtx, const std::string& dbName) override;
- Status logAction(OperationContext* txn,
+ Status logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) override;
- Status logChange(OperationContext* txn,
+ Status logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) override;
- Status shardCollection(OperationContext* txn,
+ Status shardCollection(OperationContext* opCtx,
const std::string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -91,27 +91,27 @@ public:
const std::vector<BSONObj>& initPoints,
const std::set<ShardId>& initShardsIds) override;
- StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
+ StatusWith<ShardDrainingStatus> removeShard(OperationContext* opCtx,
const ShardId& name) override;
- StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx,
const std::string& dbName) override;
- StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx,
const std::string& collNs) override;
- Status getCollections(OperationContext* txn,
+ Status getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
repl::OpTime* optime) override;
- Status dropCollection(OperationContext* txn, const NamespaceString& ns) override;
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override;
- Status getDatabasesForShard(OperationContext* txn,
+ Status getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardName,
std::vector<std::string>* dbs) override;
- Status getChunks(OperationContext* txn,
+ Status getChunks(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& sort,
boost::optional<int> limit,
@@ -119,25 +119,25 @@ public:
repl::OpTime* opTime,
repl::ReadConcernLevel readConcern) override;
- Status getTagsForCollection(OperationContext* txn,
+ Status getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) override;
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
- bool runUserManagementWriteCommand(OperationContext* txn,
+ bool runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- bool runUserManagementReadCommand(OperationContext* txn,
+ bool runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- Status applyChunkOpsDeprecated(OperationContext* txn,
+ Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -145,42 +145,42 @@ public:
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) override;
- StatusWith<BSONObj> getGlobalSettings(OperationContext* txn, StringData key) override;
+ StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) override;
- StatusWith<VersionType> getConfigVersion(OperationContext* txn,
+ StatusWith<VersionType> getConfigVersion(OperationContext* opCtx,
repl::ReadConcernLevel readConcern) override;
- void writeConfigServerDirect(OperationContext* txn,
+ void writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) override;
- Status insertConfigDocument(OperationContext* txn,
+ Status insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) override;
- StatusWith<bool> updateConfigDocument(OperationContext* txn,
+ StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) override;
- Status removeConfigDocuments(OperationContext* txn,
+ Status removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) override;
DistLockManager* getDistLockManager() override;
- Status appendInfoForConfigServerDatabases(OperationContext* txn,
+ Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
const BSONObj& listDatabasesCmd,
BSONArrayBuilder* builder) override;
/**
* Runs a read command against the config server with majority read concern.
*/
- bool runReadCommandForTest(OperationContext* txn,
+ bool runReadCommandForTest(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result);
@@ -190,7 +190,7 @@ private:
* Selects an optimal shard on which to place a newly created database from the set of
* available shards. Will return ShardNotFound if shard could not be found.
*/
- static StatusWith<ShardId> _selectShardForNewDatabase(OperationContext* txn,
+ static StatusWith<ShardId> _selectShardForNewDatabase(OperationContext* opCtx,
ShardRegistry* shardRegistry);
/**
@@ -203,12 +203,14 @@ private:
* NamespaceExists if it exists with the same casing
* DatabaseDifferCase if it exists under different casing.
*/
- Status _checkDbDoesNotExist(OperationContext* txn, const std::string& dbName, DatabaseType* db);
+ Status _checkDbDoesNotExist(OperationContext* opCtx,
+ const std::string& dbName,
+ DatabaseType* db);
/**
* Creates the specified collection name in the config database.
*/
- Status _createCappedConfigCollection(OperationContext* txn,
+ Status _createCappedConfigCollection(OperationContext* opCtx,
StringData collName,
int cappedSize,
const WriteConcernOptions& writeConcern);
@@ -217,12 +219,12 @@ private:
* Helper method for running a count command against the config server with appropriate
* error handling.
*/
- StatusWith<long long> _runCountCommandOnConfig(OperationContext* txn,
+ StatusWith<long long> _runCountCommandOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
BSONObj query);
StatusWith<repl::OpTimeWith<std::vector<BSONObj>>> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
repl::ReadConcernLevel readConcern,
const NamespaceString& nss,
@@ -240,21 +242,21 @@ private:
* given read preference. Returns NamespaceNotFound if no database metadata is found.
*/
StatusWith<repl::OpTimeWith<DatabaseType>> _fetchDatabaseMetadata(
- OperationContext* txn, const std::string& dbName, const ReadPreferenceSetting& readPref);
+ OperationContext* opCtx, const std::string& dbName, const ReadPreferenceSetting& readPref);
/**
* Best effort method, which logs diagnostic events on the config server. If the config server
* write fails for any reason a warning will be written to the local service log and the method
* will return a failed status.
*
- * @param txn Operation context in which the call is running
+ * @param opCtx Operation context in which the call is running
* @param logCollName Which config collection to write to (excluding the database name)
* @param what E.g. "split", "migrate" (not interpreted)
* @param operationNS To which collection the metadata change is being applied (not interpreted)
* @param detail Additional info about the metadata change (not interpreted)
* @param writeConcern Write concern options to use for logging
*/
- Status _log(OperationContext* txn,
+ Status _log(OperationContext* opCtx,
const StringData& logCollName,
const std::string& what,
const std::string& operationNS,
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
index 11e03fb4c70..730a411af29 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp
@@ -56,17 +56,18 @@ Status ShardingCatalogClientMock::startup() {
return Status::OK();
}
-void ShardingCatalogClientMock::shutDown(OperationContext* txn) {
+void ShardingCatalogClientMock::shutDown(OperationContext* opCtx) {
if (_distLockManager) {
- _distLockManager->shutDown(txn);
+ _distLockManager->shutDown(opCtx);
}
}
-Status ShardingCatalogClientMock::enableSharding(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientMock::enableSharding(OperationContext* opCtx,
+ const std::string& dbName) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::shardCollection(OperationContext* txn,
+Status ShardingCatalogClientMock::shardCollection(OperationContext* opCtx,
const string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -76,51 +77,52 @@ Status ShardingCatalogClientMock::shardCollection(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-StatusWith<ShardDrainingStatus> ShardingCatalogClientMock::removeShard(OperationContext* txn,
+StatusWith<ShardDrainingStatus> ShardingCatalogClientMock::removeShard(OperationContext* opCtx,
const ShardId& name) {
return ShardDrainingStatus::COMPLETED;
}
-Status ShardingCatalogClientMock::updateDatabase(OperationContext* txn,
+Status ShardingCatalogClientMock::updateDatabase(OperationContext* opCtx,
const string& dbName,
const DatabaseType& db) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientMock::getDatabase(
- OperationContext* txn, const string& dbName) {
+ OperationContext* opCtx, const string& dbName) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::updateCollection(OperationContext* txn,
+Status ShardingCatalogClientMock::updateCollection(OperationContext* opCtx,
const string& collNs,
const CollectionType& coll) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientMock::getCollection(
- OperationContext* txn, const string& collNs) {
+ OperationContext* opCtx, const string& collNs) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getCollections(OperationContext* txn,
+Status ShardingCatalogClientMock::getCollections(OperationContext* opCtx,
const string* dbName,
vector<CollectionType>* collections,
repl::OpTime* optime) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::dropCollection(OperationContext* txn, const NamespaceString& ns) {
+Status ShardingCatalogClientMock::dropCollection(OperationContext* opCtx,
+ const NamespaceString& ns) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getDatabasesForShard(OperationContext* txn,
+Status ShardingCatalogClientMock::getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardName,
vector<string>* dbs) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getChunks(OperationContext* txn,
+Status ShardingCatalogClientMock::getChunks(OperationContext* opCtx,
const BSONObj& filter,
const BSONObj& sort,
boost::optional<int> limit,
@@ -130,18 +132,18 @@ Status ShardingCatalogClientMock::getChunks(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::getTagsForCollection(OperationContext* txn,
+Status ShardingCatalogClientMock::getTagsForCollection(OperationContext* opCtx,
const string& collectionNs,
vector<TagsType>* tags) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> ShardingCatalogClientMock::getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-bool ShardingCatalogClientMock::runUserManagementWriteCommand(OperationContext* txn,
+bool ShardingCatalogClientMock::runUserManagementWriteCommand(OperationContext* opCtx,
const string& commandName,
const string& dbname,
const BSONObj& cmdObj,
@@ -149,14 +151,14 @@ bool ShardingCatalogClientMock::runUserManagementWriteCommand(OperationContext*
return true;
}
-bool ShardingCatalogClientMock::runUserManagementReadCommand(OperationContext* txn,
+bool ShardingCatalogClientMock::runUserManagementReadCommand(OperationContext* opCtx,
const string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) {
return true;
}
-Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* txn,
+Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -166,14 +168,14 @@ Status ShardingCatalogClientMock::applyChunkOpsDeprecated(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::logAction(OperationContext* txn,
+Status ShardingCatalogClientMock::logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::logChange(OperationContext* txn,
+Status ShardingCatalogClientMock::logChange(OperationContext* opCtx,
const string& what,
const string& ns,
const BSONObj& detail,
@@ -181,21 +183,21 @@ Status ShardingCatalogClientMock::logChange(OperationContext* txn,
return {ErrorCodes::InternalError, "Method not implemented"};
}
-StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings(OperationContext* txn,
+StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings(OperationContext* opCtx,
StringData key) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
StatusWith<VersionType> ShardingCatalogClientMock::getConfigVersion(
- OperationContext* txn, repl::ReadConcernLevel readConcern) {
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-void ShardingCatalogClientMock::writeConfigServerDirect(OperationContext* txn,
+void ShardingCatalogClientMock::writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) {}
-Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* txn,
+Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) {
@@ -203,7 +205,7 @@ Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* txn,
}
StatusWith<bool> ShardingCatalogClientMock::updateConfigDocument(
- OperationContext* txn,
+ OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
@@ -212,14 +214,15 @@ StatusWith<bool> ShardingCatalogClientMock::updateConfigDocument(
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::removeConfigDocuments(OperationContext* txn,
+Status ShardingCatalogClientMock::removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
-Status ShardingCatalogClientMock::createDatabase(OperationContext* txn, const std::string& dbName) {
+Status ShardingCatalogClientMock::createDatabase(OperationContext* opCtx,
+ const std::string& dbName) {
return {ErrorCodes::InternalError, "Method not implemented"};
}
@@ -228,7 +231,7 @@ DistLockManager* ShardingCatalogClientMock::getDistLockManager() {
}
Status ShardingCatalogClientMock::appendInfoForConfigServerDatabases(
- OperationContext* txn, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
+ OperationContext* opCtx, const BSONObj& listDatabasesCmd, BSONArrayBuilder* builder) {
return Status::OK();
}
diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h
index 658681fd37a..a2d223f2384 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_mock.h
+++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h
@@ -43,11 +43,11 @@ public:
Status startup() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
- Status enableSharding(OperationContext* txn, const std::string& dbName);
+ Status enableSharding(OperationContext* opCtx, const std::string& dbName);
- Status shardCollection(OperationContext* txn,
+ Status shardCollection(OperationContext* opCtx,
const std::string& ns,
const ShardKeyPattern& fieldsAndOrder,
const BSONObj& defaultCollation,
@@ -55,35 +55,35 @@ public:
const std::vector<BSONObj>& initPoints,
const std::set<ShardId>& initShardIds) override;
- StatusWith<ShardDrainingStatus> removeShard(OperationContext* txn,
+ StatusWith<ShardDrainingStatus> removeShard(OperationContext* opCtx,
const ShardId& name) override;
- Status updateDatabase(OperationContext* txn,
+ Status updateDatabase(OperationContext* opCtx,
const std::string& dbName,
const DatabaseType& db) override;
- StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx,
const std::string& dbName) override;
- Status updateCollection(OperationContext* txn,
+ Status updateCollection(OperationContext* opCtx,
const std::string& collNs,
const CollectionType& coll) override;
- StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* txn,
+ StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx,
const std::string& collNs) override;
- Status getCollections(OperationContext* txn,
+ Status getCollections(OperationContext* opCtx,
const std::string* dbName,
std::vector<CollectionType>* collections,
repl::OpTime* optime) override;
- Status dropCollection(OperationContext* txn, const NamespaceString& ns) override;
+ Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override;
- Status getDatabasesForShard(OperationContext* txn,
+ Status getDatabasesForShard(OperationContext* opCtx,
const ShardId& shardName,
std::vector<std::string>* dbs) override;
- Status getChunks(OperationContext* txn,
+ Status getChunks(OperationContext* opCtx,
const BSONObj& filter,
const BSONObj& sort,
boost::optional<int> limit,
@@ -91,25 +91,25 @@ public:
repl::OpTime* opTime,
repl::ReadConcernLevel readConcern) override;
- Status getTagsForCollection(OperationContext* txn,
+ Status getTagsForCollection(OperationContext* opCtx,
const std::string& collectionNs,
std::vector<TagsType>* tags) override;
StatusWith<repl::OpTimeWith<std::vector<ShardType>>> getAllShards(
- OperationContext* txn, repl::ReadConcernLevel readConcern) override;
+ OperationContext* opCtx, repl::ReadConcernLevel readConcern) override;
- bool runUserManagementWriteCommand(OperationContext* txn,
+ bool runUserManagementWriteCommand(OperationContext* opCtx,
const std::string& commandName,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- bool runUserManagementReadCommand(OperationContext* txn,
+ bool runUserManagementReadCommand(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder* result) override;
- Status applyChunkOpsDeprecated(OperationContext* txn,
+ Status applyChunkOpsDeprecated(OperationContext* opCtx,
const BSONArray& updateOps,
const BSONArray& preCondition,
const std::string& nss,
@@ -117,48 +117,48 @@ public:
const WriteConcernOptions& writeConcern,
repl::ReadConcernLevel readConcern) override;
- Status logAction(OperationContext* txn,
+ Status logAction(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail) override;
- Status logChange(OperationContext* txn,
+ Status logChange(OperationContext* opCtx,
const std::string& what,
const std::string& ns,
const BSONObj& detail,
const WriteConcernOptions& writeConcern) override;
- StatusWith<BSONObj> getGlobalSettings(OperationContext* txn, StringData key) override;
+ StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) override;
- StatusWith<VersionType> getConfigVersion(OperationContext* txn,
+ StatusWith<VersionType> getConfigVersion(OperationContext* opCtx,
repl::ReadConcernLevel readConcern) override;
- void writeConfigServerDirect(OperationContext* txn,
+ void writeConfigServerDirect(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response) override;
- Status insertConfigDocument(OperationContext* txn,
+ Status insertConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& doc,
const WriteConcernOptions& writeConcern) override;
- StatusWith<bool> updateConfigDocument(OperationContext* txn,
+ StatusWith<bool> updateConfigDocument(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern) override;
- Status removeConfigDocuments(OperationContext* txn,
+ Status removeConfigDocuments(OperationContext* opCtx,
const std::string& ns,
const BSONObj& query,
const WriteConcernOptions& writeConcern) override;
- Status createDatabase(OperationContext* txn, const std::string& dbName);
+ Status createDatabase(OperationContext* opCtx, const std::string& dbName);
DistLockManager* getDistLockManager() override;
- Status appendInfoForConfigServerDatabases(OperationContext* txn,
+ Status appendInfoForConfigServerDatabases(OperationContext* opCtx,
const BSONObj& listDatabasesCmd,
BSONArrayBuilder* builder) override;
diff --git a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
index 99fdcf0ecab..0ce97c5375e 100644
--- a/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_config_initialization_test.cpp
@@ -220,26 +220,26 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) {
});
operationContext()->setReplicatedWrites(false);
replicationCoordinator()->setFollowerMode(repl::MemberState::RS_ROLLBACK);
- auto txn = operationContext();
+ auto opCtx = operationContext();
auto nss = NamespaceString(VersionType::ConfigNS);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- ScopedTransaction transaction(txn, MODE_IX);
- AutoGetCollection autoColl(txn, nss, MODE_IX);
+ ScopedTransaction transaction(opCtx, MODE_IX);
+ AutoGetCollection autoColl(opCtx, nss, MODE_IX);
auto coll = autoColl.getCollection();
ASSERT_TRUE(coll);
- auto cursor = coll->getCursor(txn);
+ auto cursor = coll->getCursor(opCtx);
std::vector<RecordId> recordIds;
while (auto recordId = cursor->next()) {
recordIds.push_back(recordId->id);
}
- mongo::WriteUnitOfWork wuow(txn);
+ mongo::WriteUnitOfWork wuow(opCtx);
for (auto recordId : recordIds) {
- coll->deleteDocument(txn, recordId, nullptr);
+ coll->deleteDocument(opCtx, recordId, nullptr);
}
wuow.commit();
- ASSERT_EQUALS(0UL, coll->numRecords(txn));
+ ASSERT_EQUALS(0UL, coll->numRecords(opCtx));
}
- MONGO_WRITE_CONFLICT_RETRY_LOOP_END(txn, "removeConfigDocuments", nss.ns());
+ MONGO_WRITE_CONFLICT_RETRY_LOOP_END(opCtx, "removeConfigDocuments", nss.ns());
}
// Verify the document was actually removed.
diff --git a/src/mongo/s/catalog/sharding_catalog_manager.h b/src/mongo/s/catalog/sharding_catalog_manager.h
index e3cae76b7e3..f2e041ace84 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager.h
@@ -83,7 +83,7 @@ public:
/**
* Performs necessary cleanup when shutting down cleanly.
*/
- virtual void shutDown(OperationContext* txn) = 0;
+ virtual void shutDown(OperationContext* opCtx) = 0;
/**
*
@@ -98,7 +98,7 @@ public:
* no limitation to space usage.
* @return either an !OK status or the name of the newly added shard.
*/
- virtual StatusWith<std::string> addShard(OperationContext* txn,
+ virtual StatusWith<std::string> addShard(OperationContext* opCtx,
const std::string* shardProposedName,
const ConnectionString& shardConnectionString,
const long long maxSize) = 0;
@@ -107,7 +107,7 @@ public:
* Adds the shard to the zone.
* Returns ErrorCodes::ShardNotFound if the shard does not exist.
*/
- virtual Status addShardToZone(OperationContext* txn,
+ virtual Status addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) = 0;
@@ -115,7 +115,7 @@ public:
* Removes the shard from the zone.
* Returns ErrorCodes::ShardNotFound if the shard does not exist.
*/
- virtual Status removeShardFromZone(OperationContext* txn,
+ virtual Status removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) = 0;
@@ -124,7 +124,7 @@ public:
* the shard key, the range will be converted into a new range with full shard key filled
* with MinKey values.
*/
- virtual Status assignKeyRangeToZone(OperationContext* txn,
+ virtual Status assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range,
const std::string& zoneName) = 0;
@@ -134,7 +134,7 @@ public:
* Note: unlike assignKeyRangeToZone, the given range will never be converted to include the
* full shard key.
*/
- virtual Status removeKeyRangeFromZone(OperationContext* txn,
+ virtual Status removeKeyRangeFromZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range) = 0;
@@ -142,7 +142,7 @@ public:
* Updates metadata in config.chunks collection to show the given chunk as split
* into smaller chunks at the specified split points.
*/
- virtual Status commitChunkSplit(OperationContext* txn,
+ virtual Status commitChunkSplit(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const ChunkRange& range,
@@ -153,7 +153,7 @@ public:
* Updates metadata in config.chunks collection so the chunks with given boundaries are seen
* merged into a single larger chunk.
*/
- virtual Status commitChunkMerge(OperationContext* txn,
+ virtual Status commitChunkMerge(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
@@ -162,7 +162,7 @@ public:
/**
* Updates metadata in config.chunks collection to show the given chunk in its new shard.
*/
- virtual StatusWith<BSONObj> commitChunkMigration(OperationContext* txn,
+ virtual StatusWith<BSONObj> commitChunkMigration(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& migratedChunk,
const boost::optional<ChunkType>& controlChunk,
@@ -179,7 +179,7 @@ public:
* Initializes the collections that live in the config server. Mostly this involves building
* necessary indexes and populating the config.version document.
*/
- virtual Status initializeConfigDatabaseIfNeeded(OperationContext* txn) = 0;
+ virtual Status initializeConfigDatabaseIfNeeded(OperationContext* opCtx) = 0;
/**
* Called if the config.version document is rolled back. Indicates to the
@@ -195,13 +195,13 @@ public:
* shardIdentity doc's configsvrConnString if the _id, shardName, and clusterId do not
* conflict).
*/
- virtual BSONObj createShardIdentityUpsertForAddShard(OperationContext* txn,
+ virtual BSONObj createShardIdentityUpsertForAddShard(OperationContext* opCtx,
const std::string& shardName) = 0;
/**
* Runs the setFeatureCompatibilityVersion command on all shards.
*/
- virtual Status setFeatureCompatibilityVersionOnShards(OperationContext* txn,
+ virtual Status setFeatureCompatibilityVersionOnShards(OperationContext* opCtx,
const std::string& version) = 0;
protected:
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
index fe3e427f228..c1edeaad1dd 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_chunk_operations_impl.cpp
@@ -139,13 +139,13 @@ BSONArray buildMergeChunksApplyOpsPrecond(const std::vector<ChunkType>& chunksTo
* has not been dropped and recreated since the migration began, unbeknown to the shard when the
* command was sent.
*/
-Status checkCollectionVersionEpoch(OperationContext* txn,
+Status checkCollectionVersionEpoch(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& aChunk,
const OID& collectionEpoch) {
auto findResponseWith =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -192,7 +192,7 @@ Status checkCollectionVersionEpoch(OperationContext* txn,
return Status::OK();
}
-Status checkChunkIsOnShard(OperationContext* txn,
+Status checkChunkIsOnShard(OperationContext* opCtx,
const NamespaceString& nss,
const BSONObj& min,
const BSONObj& max,
@@ -204,8 +204,8 @@ Status checkChunkIsOnShard(OperationContext* txn,
// Must use local read concern because we're going to perform subsequent writes.
auto findResponseWith =
- Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -288,7 +288,7 @@ BSONObj makeCommitChunkApplyOpsCommand(const NamespaceString& nss,
} // namespace
-Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
+Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const ChunkRange& range,
@@ -298,11 +298,11 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
// migrations
// TODO(SERVER-25359): Replace with a collection-specific lock map to allow splits/merges/
// move chunks on different collections to proceed in parallel
- Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
// Get the chunk with highest version for this namespace
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -429,8 +429,8 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
}
// apply the batch of updates to remote and local metadata
- Status applyOpsStatus = Grid::get(txn)->catalogClient(txn)->applyChunkOpsDeprecated(
- txn,
+ Status applyOpsStatus = Grid::get(opCtx)->catalogClient(opCtx)->applyChunkOpsDeprecated(
+ opCtx,
updates.arr(),
preCond.arr(),
ns.ns(),
@@ -454,8 +454,8 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
appendShortVersion(&logDetail.subobjStart("left"), newChunks[0]);
appendShortVersion(&logDetail.subobjStart("right"), newChunks[1]);
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "split", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "split", ns.ns(), logDetail.obj(), WriteConcernOptions());
} else {
BSONObj beforeDetailObj = logDetail.obj();
BSONObj firstDetailObj = beforeDetailObj.getOwned();
@@ -468,15 +468,15 @@ Status ShardingCatalogManagerImpl::commitChunkSplit(OperationContext* txn,
chunkDetail.append("of", newChunksSize);
appendShortVersion(&chunkDetail.subobjStart("chunk"), newChunks[i]);
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "multi-split", ns.ns(), chunkDetail.obj(), WriteConcernOptions());
}
}
return applyOpsStatus;
}
-Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
+Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
@@ -488,11 +488,11 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
// migrations
// TODO(SERVER-25359): Replace with a collection-specific lock map to allow splits/merges/
// move chunks on different collections to proceed in parallel
- Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
// Get the chunk with the highest version for this namespace
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -554,8 +554,8 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
auto preCond = buildMergeChunksApplyOpsPrecond(chunksToMerge, collVersion);
// apply the batch of updates to remote and local metadata
- Status applyOpsStatus = Grid::get(txn)->catalogClient(txn)->applyChunkOpsDeprecated(
- txn,
+ Status applyOpsStatus = Grid::get(opCtx)->catalogClient(opCtx)->applyChunkOpsDeprecated(
+ opCtx,
updates,
preCond,
ns.ns(),
@@ -577,14 +577,14 @@ Status ShardingCatalogManagerImpl::commitChunkMerge(OperationContext* txn,
collVersion.addToBSON(logDetail, "prevShardVersion");
mergeVersion.addToBSON(logDetail, "mergedVersion");
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions());
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "merge", ns.ns(), logDetail.obj(), WriteConcernOptions());
return applyOpsStatus;
}
StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
- OperationContext* txn,
+ OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& migratedChunk,
const boost::optional<ChunkType>& controlChunk,
@@ -602,11 +602,11 @@ StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
// TODO(SERVER-25359): Replace with a collection-specific lock map to allow splits/merges/
// move chunks on different collections to proceed in parallel.
// (Note: This is not needed while we have a global lock, taken here only for consistency.)
- Lock::ExclusiveLock lk(txn->lockState(), _kChunkOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kChunkOpLock);
// Ensure that the epoch passed in still matches the real state of the database.
- auto epochCheck = checkCollectionVersionEpoch(txn, nss, migratedChunk, collectionEpoch);
+ auto epochCheck = checkCollectionVersionEpoch(opCtx, nss, migratedChunk, collectionEpoch);
if (!epochCheck.isOK()) {
return epochCheck;
}
@@ -614,22 +614,22 @@ StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
// Check that migratedChunk and controlChunk are where they should be, on fromShard.
auto migratedOnShard =
- checkChunkIsOnShard(txn, nss, migratedChunk.getMin(), migratedChunk.getMax(), fromShard);
+ checkChunkIsOnShard(opCtx, nss, migratedChunk.getMin(), migratedChunk.getMax(), fromShard);
if (!migratedOnShard.isOK()) {
return migratedOnShard;
}
if (controlChunk) {
auto controlOnShard = checkChunkIsOnShard(
- txn, nss, controlChunk->getMin(), controlChunk->getMax(), fromShard);
+ opCtx, nss, controlChunk->getMin(), controlChunk->getMax(), fromShard);
if (!controlOnShard.isOK()) {
return controlOnShard;
}
}
// Must use local read concern because we will perform subsequent writes.
- auto findResponse = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findResponse = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ChunkType::ConfigNS),
@@ -671,8 +671,8 @@ StatusWith<BSONObj> ShardingCatalogManagerImpl::commitChunkMigration(
nss, newMigratedChunk, newControlChunk, fromShard.toString(), toShard.toString());
StatusWith<Shard::CommandResponse> applyOpsCommandResponse =
- Grid::get(txn)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
- txn,
+ Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts(
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
command,
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
index 6c3e9ac749c..ca2245f540b 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_impl.cpp
@@ -75,7 +75,7 @@ Status ShardingCatalogManagerImpl::startup() {
return Status::OK();
}
-void ShardingCatalogManagerImpl::shutDown(OperationContext* txn) {
+void ShardingCatalogManagerImpl::shutDown(OperationContext* opCtx) {
LOG(1) << "ShardingCatalogManagerImpl::shutDown() called.";
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -86,7 +86,7 @@ void ShardingCatalogManagerImpl::shutDown(OperationContext* txn) {
_executorForAddShard->join();
}
-Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationContext* txn) {
+Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationContext* opCtx) {
{
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_configInitialized) {
@@ -95,7 +95,7 @@ Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationCon
}
}
- Status status = _initConfigIndexes(txn);
+ Status status = _initConfigIndexes(opCtx);
if (!status.isOK()) {
return status;
}
@@ -103,7 +103,7 @@ Status ShardingCatalogManagerImpl::initializeConfigDatabaseIfNeeded(OperationCon
// Make sure to write config.version last since we detect rollbacks of config.version and
// will re-run initializeConfigDatabaseIfNeeded if that happens, but we don't detect rollback
// of the index builds.
- status = _initConfigVersion(txn);
+ status = _initConfigVersion(opCtx);
if (!status.isOK()) {
return status;
}
@@ -119,11 +119,11 @@ void ShardingCatalogManagerImpl::discardCachedConfigDatabaseInitializationState(
_configInitialized = false;
}
-Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* txn) {
- const auto catalogClient = Grid::get(txn)->catalogClient(txn);
+Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* opCtx) {
+ const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
auto versionStatus =
- catalogClient->getConfigVersion(txn, repl::ReadConcernLevel::kLocalReadConcern);
+ catalogClient->getConfigVersion(opCtx, repl::ReadConcernLevel::kLocalReadConcern);
if (!versionStatus.isOK()) {
return versionStatus.getStatus();
}
@@ -144,7 +144,7 @@ Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* txn) {
BSONObj versionObj(newVersion.toBSON());
auto insertStatus = catalogClient->insertConfigDocument(
- txn, VersionType::ConfigNS, versionObj, kNoWaitWriteConcern);
+ opCtx, VersionType::ConfigNS, versionObj, kNoWaitWriteConcern);
return insertStatus;
}
@@ -168,12 +168,12 @@ Status ShardingCatalogManagerImpl::_initConfigVersion(OperationContext* txn) {
return Status::OK();
}
-Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
+Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* opCtx) {
const bool unique = true;
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
Status result =
- configShard->createIndexOnConfig(txn,
+ configShard->createIndexOnConfig(opCtx,
NamespaceString(ChunkType::ConfigNS),
BSON(ChunkType::ns() << 1 << ChunkType::min() << 1),
unique);
@@ -184,7 +184,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn,
+ opCtx,
NamespaceString(ChunkType::ConfigNS),
BSON(ChunkType::ns() << 1 << ChunkType::shard() << 1 << ChunkType::min() << 1),
unique);
@@ -195,7 +195,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn,
+ opCtx,
NamespaceString(ChunkType::ConfigNS),
BSON(ChunkType::ns() << 1 << ChunkType::DEPRECATED_lastmod() << 1),
unique);
@@ -206,7 +206,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn,
+ opCtx,
NamespaceString(MigrationType::ConfigNS),
BSON(MigrationType::ns() << 1 << MigrationType::min() << 1),
unique);
@@ -217,7 +217,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn, NamespaceString(ShardType::ConfigNS), BSON(ShardType::host() << 1), unique);
+ opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::host() << 1), unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create host_1 index on config db"
@@ -225,7 +225,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn, NamespaceString(LocksType::ConfigNS), BSON(LocksType::lockID() << 1), !unique);
+ opCtx, NamespaceString(LocksType::ConfigNS), BSON(LocksType::lockID() << 1), !unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create lock id index on config db"
@@ -233,7 +233,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result =
- configShard->createIndexOnConfig(txn,
+ configShard->createIndexOnConfig(opCtx,
NamespaceString(LocksType::ConfigNS),
BSON(LocksType::state() << 1 << LocksType::process() << 1),
!unique);
@@ -244,14 +244,14 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
result = configShard->createIndexOnConfig(
- txn, NamespaceString(LockpingsType::ConfigNS), BSON(LockpingsType::ping() << 1), !unique);
+ opCtx, NamespaceString(LockpingsType::ConfigNS), BSON(LockpingsType::ping() << 1), !unique);
if (!result.isOK()) {
return Status(result.code(),
str::stream() << "couldn't create lockping ping time index on config db"
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(txn,
+ result = configShard->createIndexOnConfig(opCtx,
NamespaceString(TagsType::ConfigNS),
BSON(TagsType::ns() << 1 << TagsType::min() << 1),
unique);
@@ -261,7 +261,7 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
<< causedBy(result));
}
- result = configShard->createIndexOnConfig(txn,
+ result = configShard->createIndexOnConfig(opCtx,
NamespaceString(TagsType::ConfigNS),
BSON(TagsType::ns() << 1 << TagsType::tag() << 1),
!unique);
@@ -275,22 +275,22 @@ Status ShardingCatalogManagerImpl::_initConfigIndexes(OperationContext* txn) {
}
Status ShardingCatalogManagerImpl::setFeatureCompatibilityVersionOnShards(
- OperationContext* txn, const std::string& version) {
+ OperationContext* opCtx, const std::string& version) {
// No shards should be added until we have forwarded featureCompatibilityVersion to all shards.
- Lock::SharedLock lk(txn->lockState(), _kShardMembershipLock);
+ Lock::SharedLock lk(opCtx->lockState(), _kShardMembershipLock);
std::vector<ShardId> shardIds;
- Grid::get(txn)->shardRegistry()->getAllShardIds(&shardIds);
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
const auto shard = shardStatus.getValue();
auto response = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON(FeatureCompatibilityVersion::kCommandName << version),
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_impl.h b/src/mongo/s/catalog/sharding_catalog_manager_impl.h
index 2b4cdd818a8..37207f81383 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_impl.h
+++ b/src/mongo/s/catalog/sharding_catalog_manager_impl.h
@@ -52,43 +52,43 @@ public:
*/
Status startup() override;
- void shutDown(OperationContext* txn) override;
+ void shutDown(OperationContext* opCtx) override;
- Status initializeConfigDatabaseIfNeeded(OperationContext* txn) override;
+ Status initializeConfigDatabaseIfNeeded(OperationContext* opCtx) override;
void discardCachedConfigDatabaseInitializationState() override;
- Status addShardToZone(OperationContext* txn,
+ Status addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) override;
- Status removeShardFromZone(OperationContext* txn,
+ Status removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) override;
- Status assignKeyRangeToZone(OperationContext* txn,
+ Status assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range,
const std::string& zoneName) override;
- Status removeKeyRangeFromZone(OperationContext* txn,
+ Status removeKeyRangeFromZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range) override;
- Status commitChunkSplit(OperationContext* txn,
+ Status commitChunkSplit(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const ChunkRange& range,
const std::vector<BSONObj>& splitPoints,
const std::string& shardName) override;
- Status commitChunkMerge(OperationContext* txn,
+ Status commitChunkMerge(OperationContext* opCtx,
const NamespaceString& ns,
const OID& requestEpoch,
const std::vector<BSONObj>& chunkBoundaries,
const std::string& shardName) override;
- StatusWith<BSONObj> commitChunkMigration(OperationContext* txn,
+ StatusWith<BSONObj> commitChunkMigration(OperationContext* opCtx,
const NamespaceString& nss,
const ChunkType& migratedChunk,
const boost::optional<ChunkType>& controlChunk,
@@ -98,15 +98,15 @@ public:
void appendConnectionStats(executor::ConnectionPoolStats* stats) override;
- StatusWith<std::string> addShard(OperationContext* txn,
+ StatusWith<std::string> addShard(OperationContext* opCtx,
const std::string* shardProposedName,
const ConnectionString& shardConnectionString,
const long long maxSize) override;
- BSONObj createShardIdentityUpsertForAddShard(OperationContext* txn,
+ BSONObj createShardIdentityUpsertForAddShard(OperationContext* opCtx,
const std::string& shardName) override;
- Status setFeatureCompatibilityVersionOnShards(OperationContext* txn,
+ Status setFeatureCompatibilityVersionOnShards(OperationContext* opCtx,
const std::string& version) override;
private:
@@ -114,12 +114,12 @@ private:
* Performs the necessary checks for version compatibility and creates a new config.version
* document if the current cluster config is empty.
*/
- Status _initConfigVersion(OperationContext* txn);
+ Status _initConfigVersion(OperationContext* opCtx);
/**
* Builds all the expected indexes on the config server.
*/
- Status _initConfigIndexes(OperationContext* txn);
+ Status _initConfigIndexes(OperationContext* opCtx);
/**
* Used during addShard to determine if there is already an existing shard that matches the
@@ -132,7 +132,7 @@ private:
* options, so the addShard attempt must be aborted.
*/
StatusWith<boost::optional<ShardType>> _checkIfShardExists(
- OperationContext* txn,
+ OperationContext* opCtx,
const ConnectionString& propsedShardConnectionString,
const std::string* shardProposedName,
long long maxSize);
@@ -153,7 +153,7 @@ private:
* shard's name should be checked and if empty, one should be generated using some uniform
* algorithm.
*/
- StatusWith<ShardType> _validateHostAsShard(OperationContext* txn,
+ StatusWith<ShardType> _validateHostAsShard(OperationContext* opCtx,
std::shared_ptr<RemoteCommandTargeter> targeter,
const std::string* shardProposedName,
const ConnectionString& connectionString);
@@ -164,13 +164,13 @@ private:
* purposes.
*/
StatusWith<std::vector<std::string>> _getDBNamesListFromShard(
- OperationContext* txn, std::shared_ptr<RemoteCommandTargeter> targeter);
+ OperationContext* opCtx, std::shared_ptr<RemoteCommandTargeter> targeter);
/**
* Runs a command against a "shard" that is not yet in the cluster and thus not present in the
* ShardRegistry.
*/
- StatusWith<Shard::CommandResponse> _runCommandForAddShard(OperationContext* txn,
+ StatusWith<Shard::CommandResponse> _runCommandForAddShard(OperationContext* opCtx,
RemoteCommandTargeter* targeter,
const std::string& dbName,
const BSONObj& cmdObj);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
index b65ba11f531..82b006780b3 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_shard_operations_impl.cpp
@@ -87,12 +87,12 @@ MONGO_FP_DECLARE(dontUpsertShardIdentityOnNewShards);
/**
* Generates a unique name to be given to a newly added shard.
*/
-StatusWith<std::string> generateNewShardName(OperationContext* txn) {
+StatusWith<std::string> generateNewShardName(OperationContext* opCtx) {
BSONObjBuilder shardNameRegex;
shardNameRegex.appendRegex(ShardType::name(), "^shard");
- auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
- txn,
+ auto findStatus = Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig(
+ opCtx,
kConfigReadSelector,
repl::ReadConcernLevel::kMajorityReadConcern,
NamespaceString(ShardType::ConfigNS),
@@ -130,11 +130,11 @@ StatusWith<std::string> generateNewShardName(OperationContext* txn) {
} // namespace
StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAddShard(
- OperationContext* txn,
+ OperationContext* opCtx,
RemoteCommandTargeter* targeter,
const std::string& dbName,
const BSONObj& cmdObj) {
- auto host = targeter->findHost(txn, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
+ auto host = targeter->findHost(opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly});
if (!host.isOK()) {
return host.getStatus();
}
@@ -198,13 +198,13 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAdd
}
StatusWith<boost::optional<ShardType>> ShardingCatalogManagerImpl::_checkIfShardExists(
- OperationContext* txn,
+ OperationContext* opCtx,
const ConnectionString& proposedShardConnectionString,
const std::string* proposedShardName,
long long proposedShardMaxSize) {
// Check whether any host in the connection is already part of the cluster.
- const auto existingShards = Grid::get(txn)->catalogClient(txn)->getAllShards(
- txn, repl::ReadConcernLevel::kLocalReadConcern);
+ const auto existingShards = Grid::get(opCtx)->catalogClient(opCtx)->getAllShards(
+ opCtx, repl::ReadConcernLevel::kLocalReadConcern);
if (!existingShards.isOK()) {
return Status(existingShards.getStatus().code(),
str::stream() << "Failed to load existing shards during addShard"
@@ -293,7 +293,7 @@ StatusWith<boost::optional<ShardType>> ShardingCatalogManagerImpl::_checkIfShard
}
StatusWith<ShardType> ShardingCatalogManagerImpl::_validateHostAsShard(
- OperationContext* txn,
+ OperationContext* opCtx,
std::shared_ptr<RemoteCommandTargeter> targeter,
const std::string* shardProposedName,
const ConnectionString& connectionString) {
@@ -301,7 +301,7 @@ StatusWith<ShardType> ShardingCatalogManagerImpl::_validateHostAsShard(
// Check if the node being added is a mongos or a version of mongod too old to speak the current
// communication protocol.
auto swCommandResponse =
- _runCommandForAddShard(txn, targeter.get(), "admin", BSON("isMaster" << 1));
+ _runCommandForAddShard(opCtx, targeter.get(), "admin", BSON("isMaster" << 1));
if (!swCommandResponse.isOK()) {
if (swCommandResponse.getStatus() == ErrorCodes::RPCProtocolNegotiationFailed) {
// Mongos to mongos commands are no longer supported in the wire protocol
@@ -479,10 +479,10 @@ StatusWith<ShardType> ShardingCatalogManagerImpl::_validateHostAsShard(
}
StatusWith<std::vector<std::string>> ShardingCatalogManagerImpl::_getDBNamesListFromShard(
- OperationContext* txn, std::shared_ptr<RemoteCommandTargeter> targeter) {
+ OperationContext* opCtx, std::shared_ptr<RemoteCommandTargeter> targeter) {
auto swCommandResponse =
- _runCommandForAddShard(txn, targeter.get(), "admin", BSON("listDatabases" << 1));
+ _runCommandForAddShard(opCtx, targeter.get(), "admin", BSON("listDatabases" << 1));
if (!swCommandResponse.isOK()) {
return swCommandResponse.getStatus();
}
@@ -509,7 +509,7 @@ StatusWith<std::vector<std::string>> ShardingCatalogManagerImpl::_getDBNamesList
}
StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
- OperationContext* txn,
+ OperationContext* opCtx,
const std::string* shardProposedName,
const ConnectionString& shardConnectionString,
const long long maxSize) {
@@ -522,12 +522,12 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
}
// Only one addShard operation can be in progress at a time.
- Lock::ExclusiveLock lk(txn->lockState(), _kShardMembershipLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kShardMembershipLock);
// Check if this shard has already been added (can happen in the case of a retry after a network
// error, for example) and thus this addShard request should be considered a no-op.
auto existingShard =
- _checkIfShardExists(txn, shardConnectionString, shardProposedName, maxSize);
+ _checkIfShardExists(opCtx, shardConnectionString, shardProposedName, maxSize);
if (!existingShard.isOK()) {
return existingShard.getStatus();
}
@@ -536,7 +536,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// addShard request. Make sure to set the last optime for the client to the system last
// optime so that we'll still wait for replication so that this state is visible in the
// committed snapshot.
- repl::ReplClientInfo::forClient(txn->getClient()).setLastOpToSystemLastOpTime(txn);
+ repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);
return existingShard.getValue()->getName();
}
@@ -547,15 +547,15 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// Note: This is necessary because as of 3.4, removeShard is performed by mongos (unlike
// addShard), so the ShardRegistry is not synchronously reloaded on the config server when a
// shard is removed.
- if (!Grid::get(txn)->shardRegistry()->reload(txn)) {
+ if (!Grid::get(opCtx)->shardRegistry()->reload(opCtx)) {
// If the first reload joined an existing one, call reload again to ensure the reload is
// fresh.
- Grid::get(txn)->shardRegistry()->reload(txn);
+ Grid::get(opCtx)->shardRegistry()->reload(opCtx);
}
// TODO: Don't create a detached Shard object, create a detached RemoteCommandTargeter instead.
const std::shared_ptr<Shard> shard{
- Grid::get(txn)->shardRegistry()->createConnection(shardConnectionString)};
+ Grid::get(opCtx)->shardRegistry()->createConnection(shardConnectionString)};
invariant(shard);
auto targeter = shard->getTargeter();
@@ -571,20 +571,20 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// Validate the specified connection string may serve as shard at all
auto shardStatus =
- _validateHostAsShard(txn, targeter, shardProposedName, shardConnectionString);
+ _validateHostAsShard(opCtx, targeter, shardProposedName, shardConnectionString);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
ShardType& shardType = shardStatus.getValue();
// Check that none of the existing shard candidate's dbs exist already
- auto dbNamesStatus = _getDBNamesListFromShard(txn, targeter);
+ auto dbNamesStatus = _getDBNamesListFromShard(opCtx, targeter);
if (!dbNamesStatus.isOK()) {
return dbNamesStatus.getStatus();
}
for (const auto& dbName : dbNamesStatus.getValue()) {
- auto dbt = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName);
+ auto dbt = Grid::get(opCtx)->catalogClient(opCtx)->getDatabase(opCtx, dbName);
if (dbt.isOK()) {
const auto& dbDoc = dbt.getValue().value;
return Status(ErrorCodes::OperationFailed,
@@ -603,7 +603,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
// If a name for a shard wasn't provided, generate one
if (shardType.getName().empty()) {
- auto result = generateNewShardName(txn);
+ auto result = generateNewShardName(opCtx);
if (!result.isOK()) {
return result.getStatus();
}
@@ -619,7 +619,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
if (serverGlobalParams.featureCompatibility.version.load() ==
ServerGlobalParams::FeatureCompatibility::Version::k34) {
auto versionResponse =
- _runCommandForAddShard(txn,
+ _runCommandForAddShard(opCtx,
targeter.get(),
"admin",
BSON(FeatureCompatibilityVersion::kCommandName
@@ -640,12 +640,12 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
}
if (!MONGO_FAIL_POINT(dontUpsertShardIdentityOnNewShards)) {
- auto commandRequest = createShardIdentityUpsertForAddShard(txn, shardType.getName());
+ auto commandRequest = createShardIdentityUpsertForAddShard(opCtx, shardType.getName());
LOG(2) << "going to insert shardIdentity document into shard: " << shardType;
auto swCommandResponse =
- _runCommandForAddShard(txn, targeter.get(), "admin", commandRequest);
+ _runCommandForAddShard(opCtx, targeter.get(), "admin", commandRequest);
if (!swCommandResponse.isOK()) {
return swCommandResponse.getStatus();
}
@@ -662,8 +662,11 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
log() << "going to insert new entry for shard into config.shards: " << shardType.toString();
- Status result = Grid::get(txn)->catalogClient(txn)->insertConfigDocument(
- txn, ShardType::ConfigNS, shardType.toBSON(), ShardingCatalogClient::kMajorityWriteConcern);
+ Status result = Grid::get(opCtx)->catalogClient(opCtx)->insertConfigDocument(
+ opCtx,
+ ShardType::ConfigNS,
+ shardType.toBSON(),
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!result.isOK()) {
log() << "error adding shard: " << shardType.toBSON() << " err: " << result.reason();
return result;
@@ -676,7 +679,7 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
dbt.setPrimary(shardType.getName());
dbt.setSharded(false);
- Status status = Grid::get(txn)->catalogClient(txn)->updateDatabase(txn, dbName, dbt);
+ Status status = Grid::get(opCtx)->catalogClient(opCtx)->updateDatabase(opCtx, dbName, dbt);
if (!status.isOK()) {
log() << "adding shard " << shardConnectionString.toString()
<< " even though could not add database " << dbName;
@@ -688,12 +691,12 @@ StatusWith<std::string> ShardingCatalogManagerImpl::addShard(
shardDetails.append("name", shardType.getName());
shardDetails.append("host", shardConnectionString.toString());
- Grid::get(txn)->catalogClient(txn)->logChange(
- txn, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern);
+ Grid::get(opCtx)->catalogClient(opCtx)->logChange(
+ opCtx, "addShard", "", shardDetails.obj(), ShardingCatalogClient::kMajorityWriteConcern);
// Ensure the added shard is visible to this process.
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- if (!shardRegistry->getShard(txn, shardType.getName()).isOK()) {
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ if (!shardRegistry->getShard(opCtx, shardType.getName()).isOK()) {
return {ErrorCodes::OperationFailed,
"Could not find shard metadata for shard after adding it. This most likely "
"indicates that the shard was removed immediately after it was added."};
@@ -708,13 +711,13 @@ void ShardingCatalogManagerImpl::appendConnectionStats(executor::ConnectionPoolS
}
BSONObj ShardingCatalogManagerImpl::createShardIdentityUpsertForAddShard(
- OperationContext* txn, const std::string& shardName) {
+ OperationContext* opCtx, const std::string& shardName) {
std::unique_ptr<BatchedUpdateDocument> updateDoc(new BatchedUpdateDocument());
BSONObjBuilder query;
query.append("_id", "shardIdentity");
query.append(ShardIdentityType::shardName(), shardName);
- query.append(ShardIdentityType::clusterId(), ClusterIdentityLoader::get(txn)->getClusterId());
+ query.append(ShardIdentityType::clusterId(), ClusterIdentityLoader::get(opCtx)->getClusterId());
updateDoc->setQuery(query.obj());
BSONObjBuilder update;
@@ -722,7 +725,7 @@ BSONObj ShardingCatalogManagerImpl::createShardIdentityUpsertForAddShard(
BSONObjBuilder set(update.subobjStart("$set"));
set.append(
ShardIdentityType::configsvrConnString(),
- repl::ReplicationCoordinator::get(txn)->getConfig().getConnectionString().toString());
+ repl::ReplicationCoordinator::get(opCtx)->getConfig().getConnectionString().toString());
}
updateDoc->setUpdateExpr(update.obj());
updateDoc->setUpsert(true);
diff --git a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
index 6af584d3c30..b3e57ce1a0b 100644
--- a/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_manager_zone_operations_impl.cpp
@@ -58,7 +58,7 @@ const WriteConcernOptions kNoWaitWriteConcern(1, WriteConcernOptions::SyncMode::
* Note: range should have the full shard key.
* Returns ErrorCodes::RangeOverlapConflict is an overlap is detected.
*/
-Status checkForOveralappedZonedKeyRange(OperationContext* txn,
+Status checkForOveralappedZonedKeyRange(OperationContext* opCtx,
Shard* configServer,
const NamespaceString& ns,
const ChunkRange& range,
@@ -66,7 +66,7 @@ Status checkForOveralappedZonedKeyRange(OperationContext* txn,
const KeyPattern& shardKeyPattern) {
DistributionStatus chunkDist(ns, ShardToChunksMap{});
- auto tagStatus = configServer->exhaustiveFindOnConfig(txn,
+ auto tagStatus = configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(TagsType::ConfigNS),
@@ -112,13 +112,13 @@ Status checkForOveralappedZonedKeyRange(OperationContext* txn,
* - ErrorCodes::ShardKeyNotFound if range is not compatible (for example, not a prefix of shard
* key) with the shard key of ns.
*/
-StatusWith<ChunkRange> includeFullShardKey(OperationContext* txn,
+StatusWith<ChunkRange> includeFullShardKey(OperationContext* opCtx,
Shard* configServer,
const NamespaceString& ns,
const ChunkRange& range,
KeyPattern* shardKeyPatternOut) {
auto findCollStatus =
- configServer->exhaustiveFindOnConfig(txn,
+ configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(CollectionType::ConfigNS),
@@ -172,13 +172,13 @@ StatusWith<ChunkRange> includeFullShardKey(OperationContext* txn,
} // namespace
-Status ShardingCatalogManagerImpl::addShardToZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::addShardToZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
ShardType::ConfigNS,
BSON(ShardType::name(shardName)),
BSON("$addToSet" << BSON(ShardType::tags() << zoneName)),
@@ -197,12 +197,12 @@ Status ShardingCatalogManagerImpl::addShardToZone(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* opCtx,
const std::string& shardName,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
const NamespaceString shardNS(ShardType::ConfigNS);
//
@@ -210,7 +210,7 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
//
auto findShardExistsStatus =
- configShard->exhaustiveFindOnConfig(txn,
+ configShard->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
shardNS,
@@ -232,7 +232,7 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
//
auto findShardStatus =
- configShard->exhaustiveFindOnConfig(txn,
+ configShard->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
shardNS,
@@ -265,7 +265,7 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
}
auto findChunkRangeStatus =
- configShard->exhaustiveFindOnConfig(txn,
+ configShard->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(TagsType::ConfigNS),
@@ -287,8 +287,8 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
// Perform update.
//
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
ShardType::ConfigNS,
BSON(ShardType::name(shardName)),
BSON("$pull" << BSON(ShardType::tags() << zoneName)),
@@ -309,17 +309,17 @@ Status ShardingCatalogManagerImpl::removeShardFromZone(OperationContext* txn,
}
-Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& givenRange,
const std::string& zoneName) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto configServer = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
KeyPattern shardKeyPattern{BSONObj()};
auto fullShardKeyStatus =
- includeFullShardKey(txn, configServer.get(), ns, givenRange, &shardKeyPattern);
+ includeFullShardKey(opCtx, configServer.get(), ns, givenRange, &shardKeyPattern);
if (!fullShardKeyStatus.isOK()) {
return fullShardKeyStatus.getStatus();
}
@@ -327,7 +327,7 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
const auto& fullShardKeyRange = fullShardKeyStatus.getValue();
auto zoneExistStatus =
- configServer->exhaustiveFindOnConfig(txn,
+ configServer->exhaustiveFindOnConfig(opCtx,
kConfigPrimarySelector,
repl::ReadConcernLevel::kLocalReadConcern,
NamespaceString(ShardType::ConfigNS),
@@ -346,7 +346,7 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
}
auto overlapStatus = checkForOveralappedZonedKeyRange(
- txn, configServer.get(), ns, fullShardKeyRange, zoneName, shardKeyPattern);
+ opCtx, configServer.get(), ns, fullShardKeyRange, zoneName, shardKeyPattern);
if (!overlapStatus.isOK()) {
return overlapStatus;
}
@@ -362,8 +362,8 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
updateBuilder.append(TagsType::max(), fullShardKeyRange.getMax());
updateBuilder.append(TagsType::tag(), zoneName);
- auto updateStatus = Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn, TagsType::ConfigNS, updateQuery, updateBuilder.obj(), true, kNoWaitWriteConcern);
+ auto updateStatus = Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx, TagsType::ConfigNS, updateQuery, updateBuilder.obj(), true, kNoWaitWriteConcern);
if (!updateStatus.isOK()) {
return updateStatus.getStatus();
@@ -372,16 +372,16 @@ Status ShardingCatalogManagerImpl::assignKeyRangeToZone(OperationContext* txn,
return Status::OK();
}
-Status ShardingCatalogManagerImpl::removeKeyRangeFromZone(OperationContext* txn,
+Status ShardingCatalogManagerImpl::removeKeyRangeFromZone(OperationContext* opCtx,
const NamespaceString& ns,
const ChunkRange& range) {
- Lock::ExclusiveLock lk(txn->lockState(), _kZoneOpLock);
+ Lock::ExclusiveLock lk(opCtx->lockState(), _kZoneOpLock);
- auto configServer = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard();
KeyPattern shardKeyPattern{BSONObj()};
auto fullShardKeyStatus =
- includeFullShardKey(txn, configServer.get(), ns, range, &shardKeyPattern);
+ includeFullShardKey(opCtx, configServer.get(), ns, range, &shardKeyPattern);
if (!fullShardKeyStatus.isOK()) {
return fullShardKeyStatus.getStatus();
}
@@ -390,8 +390,8 @@ Status ShardingCatalogManagerImpl::removeKeyRangeFromZone(OperationContext* txn,
removeBuilder.append("_id", BSON(TagsType::ns(ns.ns()) << TagsType::min(range.getMin())));
removeBuilder.append(TagsType::max(), range.getMax());
- return Grid::get(txn)->catalogClient(txn)->removeConfigDocuments(
- txn, TagsType::ConfigNS, removeBuilder.obj(), kNoWaitWriteConcern);
+ return Grid::get(opCtx)->catalogClient(opCtx)->removeConfigDocuments(
+ opCtx, TagsType::ConfigNS, removeBuilder.obj(), kNoWaitWriteConcern);
}
} // namespace mongo
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index b8cec8396de..d2c8eaf5504 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -45,7 +45,7 @@ CatalogCache::CatalogCache() = default;
CatalogCache::~CatalogCache() = default;
-StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* txn,
+StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext* opCtx,
StringData dbName) {
stdx::lock_guard<stdx::mutex> guard(_mutex);
@@ -55,7 +55,7 @@ StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext
}
// Need to load from the store
- auto status = Grid::get(txn)->catalogClient(txn)->getDatabase(txn, dbName.toString());
+ auto status = Grid::get(opCtx)->catalogClient(opCtx)->getDatabase(opCtx, dbName.toString());
if (!status.isOK()) {
return status.getStatus();
}
@@ -63,7 +63,7 @@ StatusWith<std::shared_ptr<DBConfig>> CatalogCache::getDatabase(OperationContext
const auto& dbOpTimePair = status.getValue();
auto db = std::make_shared<DBConfig>(dbOpTimePair.value, dbOpTimePair.opTime);
try {
- db->load(txn);
+ db->load(opCtx);
auto emplaceResult = _databases.try_emplace(dbName, std::move(db));
return emplaceResult.first->second;
} catch (const DBException& ex) {
diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h
index 8d30c1aebf0..0e63f94b52a 100644
--- a/src/mongo/s/catalog_cache.h
+++ b/src/mongo/s/catalog_cache.h
@@ -62,7 +62,7 @@ public:
*
* Returns the database cache entry if the database exists or a failed status otherwise.
*/
- StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* txn, StringData dbName);
+ StatusWith<std::shared_ptr<DBConfig>> getDatabase(OperationContext* opCtx, StringData dbName);
/**
* Removes the database information for the specified name from the cache, so that the
diff --git a/src/mongo/s/chunk_diff.cpp b/src/mongo/s/chunk_diff.cpp
index 90ec41b8923..f21555043ad 100644
--- a/src/mongo/s/chunk_diff.cpp
+++ b/src/mongo/s/chunk_diff.cpp
@@ -90,7 +90,7 @@ typename ConfigDiffTracker<ValType>::RangeOverlap ConfigDiffTracker<ValType>::_o
}
template <class ValType>
-int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
+int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* opCtx,
const std::vector<ChunkType>& chunks) {
// Apply the chunk changes to the ranges and versions
//
@@ -129,7 +129,7 @@ int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
}
// Chunk version changes
- ShardId shard = shardFor(txn, chunk.getShard());
+ ShardId shard = shardFor(opCtx, chunk.getShard());
typename MaxChunkVersionMap::const_iterator shardVersionIt = _maxShardVersions->find(shard);
if (shardVersionIt == _maxShardVersions->end() || shardVersionIt->second < chunkVersion) {
@@ -164,7 +164,7 @@ int ConfigDiffTracker<ValType>::calculateConfigDiff(OperationContext* txn,
return -1;
}
- _currMap->insert(rangeFor(txn, chunk));
+ _currMap->insert(rangeFor(opCtx, chunk));
}
return _validDiffs;
diff --git a/src/mongo/s/chunk_diff.h b/src/mongo/s/chunk_diff.h
index 9ea6ed5b62e..0cea9fa678a 100644
--- a/src/mongo/s/chunk_diff.h
+++ b/src/mongo/s/chunk_diff.h
@@ -103,7 +103,7 @@ public:
// Applies changes to the config data from a vector of chunks passed in. Also includes minor
// version changes for particular major-version chunks if explicitly specified.
// Returns the number of diffs processed, or -1 if the diffs were inconsistent.
- int calculateConfigDiff(OperationContext* txn, const std::vector<ChunkType>& chunks);
+ int calculateConfigDiff(OperationContext* opCtx, const std::vector<ChunkType>& chunks);
protected:
/**
@@ -119,10 +119,10 @@ protected:
return true;
}
- virtual std::pair<BSONObj, ValType> rangeFor(OperationContext* txn,
+ virtual std::pair<BSONObj, ValType> rangeFor(OperationContext* opCtx,
const ChunkType& chunk) const = 0;
- virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const = 0;
+ virtual ShardId shardFor(OperationContext* opCtx, const ShardId& name) const = 0;
private:
// Whether or not a range exists in the min/max region
diff --git a/src/mongo/s/chunk_diff_test.cpp b/src/mongo/s/chunk_diff_test.cpp
index fee3d67743e..888bf8f1809 100644
--- a/src/mongo/s/chunk_diff_test.cpp
+++ b/src/mongo/s/chunk_diff_test.cpp
@@ -64,11 +64,11 @@ public:
return true;
}
- virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* txn, const ChunkType& chunk) const {
+ virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* opCtx, const ChunkType& chunk) const {
return make_pair(chunk.getMin(), chunk.getMax());
}
- virtual ShardId shardFor(OperationContext* txn, const ShardId& name) const {
+ virtual ShardId shardFor(OperationContext* opCtx, const ShardId& name) const {
return name;
}
};
@@ -84,7 +84,7 @@ public:
return false;
}
- virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* txn, const ChunkType& chunk) const {
+ virtual pair<BSONObj, BSONObj> rangeFor(OperationContext* opCtx, const ChunkType& chunk) const {
return make_pair(chunk.getMax(), chunk.getMin());
}
};
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 75f2cc7c11c..20cfd7e098f 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -87,13 +87,14 @@ public:
return false;
}
- pair<BSONObj, shared_ptr<Chunk>> rangeFor(OperationContext* txn,
+ pair<BSONObj, shared_ptr<Chunk>> rangeFor(OperationContext* opCtx,
const ChunkType& chunk) const final {
return std::make_pair(chunk.getMax(), std::make_shared<Chunk>(chunk));
}
- ShardId shardFor(OperationContext* txn, const ShardId& shardId) const final {
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ ShardId shardFor(OperationContext* opCtx, const ShardId& shardId) const final {
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
return shard->getId();
}
@@ -170,7 +171,7 @@ ChunkManager::ChunkManager(NamespaceString nss,
ChunkManager::~ChunkManager() = default;
-void ChunkManager::loadExistingRanges(OperationContext* txn, const ChunkManager* oldManager) {
+void ChunkManager::loadExistingRanges(OperationContext* opCtx, const ChunkManager* oldManager) {
invariant(!_version.isSet());
int tries = 3;
@@ -187,7 +188,7 @@ void ChunkManager::loadExistingRanges(OperationContext* txn, const ChunkManager*
<< " sequenceNumber: " << _sequenceNumber
<< " based on: " << (oldManager ? oldManager->getVersion().toString() : "(empty)");
- if (_load(txn, chunkMap, shardIds, &shardVersions, oldManager)) {
+ if (_load(opCtx, chunkMap, shardIds, &shardVersions, oldManager)) {
// TODO: Merge into diff code above, so we validate in one place
if (isChunkMapValid(chunkMap)) {
_chunkMap = std::move(chunkMap);
@@ -213,7 +214,7 @@ void ChunkManager::loadExistingRanges(OperationContext* txn, const ChunkManager*
<< " after 3 attempts. Please try again.");
}
-bool ChunkManager::_load(OperationContext* txn,
+bool ChunkManager::_load(OperationContext* opCtx,
ChunkMap& chunkMap,
set<ShardId>& shardIds,
ShardVersionMap* shardVersions,
@@ -252,8 +253,8 @@ bool ChunkManager::_load(OperationContext* txn,
// Diff tracker should *always* find at least one chunk if collection exists
repl::OpTime opTime;
std::vector<ChunkType> chunks;
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getChunks(
- txn,
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getChunks(
+ opCtx,
diffQuery.query,
diffQuery.sort,
boost::none,
@@ -264,14 +265,14 @@ bool ChunkManager::_load(OperationContext* txn,
invariant(opTime >= _configOpTime);
_configOpTime = opTime;
- int diffsApplied = differ.calculateConfigDiff(txn, chunks);
+ int diffsApplied = differ.calculateConfigDiff(opCtx, chunks);
if (diffsApplied > 0) {
LOG(2) << "loaded " << diffsApplied << " chunks into new chunk manager for " << _nss
<< " with version " << _version;
// Add all existing shards we find to the shards set
for (ShardVersionMap::iterator it = shardVersions->begin(); it != shardVersions->end();) {
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, it->first);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, it->first);
if (shardStatus.isOK()) {
shardIds.insert(it->first);
++it;
@@ -347,7 +348,7 @@ std::shared_ptr<Chunk> ChunkManager::findIntersectingChunkWithSimpleCollation(
return findIntersectingChunk(shardKey, CollationSpec::kSimpleSpec);
}
-void ChunkManager::getShardIdsForQuery(OperationContext* txn,
+void ChunkManager::getShardIdsForQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
set<ShardId>* shardIds) const {
@@ -360,8 +361,8 @@ void ChunkManager::getShardIdsForQuery(OperationContext* txn,
qr->setCollation(_defaultCollator->getSpec().toBSON());
}
- std::unique_ptr<CanonicalQuery> cq =
- uassertStatusOK(CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop()));
+ std::unique_ptr<CanonicalQuery> cq = uassertStatusOK(
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop()));
// Query validation
if (QueryPlannerCommon::hasNode(cq->root(), MatchExpression::GEO_NEAR)) {
diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h
index 08d1d9fe229..365d4d5df62 100644
--- a/src/mongo/s/chunk_manager.h
+++ b/src/mongo/s/chunk_manager.h
@@ -103,7 +103,7 @@ public:
}
// Loads existing ranges based on info in chunk manager
- void loadExistingRanges(OperationContext* txn, const ChunkManager* oldManager);
+ void loadExistingRanges(OperationContext* opCtx, const ChunkManager* oldManager);
//
// Methods to use once loaded / created
@@ -133,7 +133,7 @@ public:
* Finds the shard IDs for a given filter and collation. If collation is empty, we use the
* collection default collation for targeting.
*/
- void getShardIdsForQuery(OperationContext* txn,
+ void getShardIdsForQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
std::set<ShardId>* shardIds) const;
@@ -218,7 +218,7 @@ private:
* _chunkRangeMap are consistent with each other. If false is returned, it is not safe to use
* the chunk manager anymore.
*/
- bool _load(OperationContext* txn,
+ bool _load(OperationContext* opCtx,
ChunkMap& chunks,
std::set<ShardId>& shardIds,
ShardVersionMap* shardVersions,
diff --git a/src/mongo/s/client/parallel.cpp b/src/mongo/s/client/parallel.cpp
index 1b16dee2032..89d2836dc92 100644
--- a/src/mongo/s/client/parallel.cpp
+++ b/src/mongo/s/client/parallel.cpp
@@ -211,13 +211,13 @@ ParallelSortClusteredCursor::~ParallelSortClusteredCursor() {
_done = true;
}
-void ParallelSortClusteredCursor::init(OperationContext* txn) {
+void ParallelSortClusteredCursor::init(OperationContext* opCtx) {
if (_didInit)
return;
_didInit = true;
if (!_qSpec.isEmpty()) {
- fullInit(txn);
+ fullInit(opCtx);
} else {
// You can only get here by using the legacy constructor
// TODO: Eliminate this
@@ -316,17 +316,17 @@ void ParallelSortClusteredCursor::_finishCons() {
17306, "have to have all text meta sort keys in projection", textMetaSortKeyFields.empty());
}
-void ParallelSortClusteredCursor::fullInit(OperationContext* txn) {
- startInit(txn);
- finishInit(txn);
+void ParallelSortClusteredCursor::fullInit(OperationContext* opCtx) {
+ startInit(opCtx);
+ finishInit(opCtx);
}
-void ParallelSortClusteredCursor::_markStaleNS(OperationContext* txn,
+void ParallelSortClusteredCursor::_markStaleNS(OperationContext* opCtx,
const NamespaceString& staleNS,
const StaleConfigException& e,
bool& forceReload) {
if (e.requiresFullReload()) {
- Grid::get(txn)->catalogCache()->invalidate(staleNS.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(staleNS.db());
}
if (_staleNSMap.find(staleNS.ns()) == _staleNSMap.end())
@@ -344,10 +344,10 @@ void ParallelSortClusteredCursor::_markStaleNS(OperationContext* txn,
forceReload = tries > 2;
}
-void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* txn,
+void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* opCtx,
const NamespaceString& staleNS,
bool forceReload) {
- auto scopedCMStatus = ScopedChunkManager::get(txn, staleNS);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, staleNS);
if (!scopedCMStatus.isOK()) {
log() << "cannot reload database info for stale namespace " << staleNS.ns();
return;
@@ -356,11 +356,11 @@ void ParallelSortClusteredCursor::_handleStaleNS(OperationContext* txn,
const auto& scopedCM = scopedCMStatus.getValue();
// Reload chunk manager, potentially forcing the namespace
- scopedCM.db()->getChunkManagerIfExists(txn, staleNS.ns(), true, forceReload);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, staleNS.ns(), true, forceReload);
}
void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
- OperationContext* txn,
+ OperationContext* opCtx,
std::shared_ptr<ParallelConnectionState> state,
const ShardId& shardId,
std::shared_ptr<Shard> primary,
@@ -377,7 +377,8 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
// Setup conn
if (!state->conn) {
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
state->conn.reset(new ShardConnection(shard->getConnString(), ns.ns(), manager));
}
@@ -440,7 +441,7 @@ void ParallelSortClusteredCursor::setupVersionAndHandleSlaveOk(
}
}
-void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
+void ParallelSortClusteredCursor::startInit(OperationContext* opCtx) {
const bool returnPartial = (_qSpec.options() & QueryOption_PartialResults);
const NamespaceString nss(!_cInfo.isEmpty() ? _cInfo.versionedNS : _qSpec.ns());
@@ -458,7 +459,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
shared_ptr<Shard> primary;
{
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (scopedCMStatus != ErrorCodes::NamespaceNotFound) {
uassertStatusOK(scopedCMStatus.getStatus());
const auto& scopedCM = scopedCMStatus.getValue();
@@ -476,7 +477,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
<< manager->getVersion().toString() << "]";
}
- manager->getShardIdsForQuery(txn,
+ manager->getShardIdsForQuery(opCtx,
!_cInfo.isEmpty() ? _cInfo.cmdFilter : _qSpec.filter(),
!_cInfo.isEmpty() ? _cInfo.cmdCollation : BSONObj(),
&shardIds);
@@ -551,7 +552,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
mdata.pcState = std::make_shared<ParallelConnectionState>();
auto state = mdata.pcState;
- setupVersionAndHandleSlaveOk(txn, state, shardId, primary, nss, vinfo, manager);
+ setupVersionAndHandleSlaveOk(opCtx, state, shardId, primary, nss, vinfo, manager);
const string& ns = _qSpec.ns();
@@ -643,7 +644,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
// Probably need to retry fully
bool forceReload;
- _markStaleNS(txn, staleNS, e, forceReload);
+ _markStaleNS(opCtx, staleNS, e, forceReload);
LOG(1) << "stale config of ns " << staleNS
<< " during initialization, will retry with forced : " << forceReload
@@ -654,10 +655,10 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
warning() << "versioned ns " << nss.ns() << " doesn't match stale config namespace "
<< staleNS;
- _handleStaleNS(txn, staleNS, forceReload);
+ _handleStaleNS(opCtx, staleNS, forceReload);
// Restart with new chunk manager
- startInit(txn);
+ startInit(opCtx);
return;
} catch (SocketException& e) {
warning() << "socket exception when initializing on " << shardId
@@ -727,7 +728,7 @@ void ParallelSortClusteredCursor::startInit(OperationContext* txn) {
}
}
-void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
+void ParallelSortClusteredCursor::finishInit(OperationContext* opCtx) {
bool returnPartial = (_qSpec.options() & QueryOption_PartialResults);
bool specialVersion = _cInfo.versionedNS.size() > 0;
string ns = specialVersion ? _cInfo.versionedNS : _qSpec.ns();
@@ -867,7 +868,7 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
const StaleConfigException& exception = i->second;
bool forceReload;
- _markStaleNS(txn, staleNS, exception, forceReload);
+ _markStaleNS(opCtx, staleNS, exception, forceReload);
LOG(1) << "stale config of ns " << staleNS
<< " on finishing query, will retry with forced : " << forceReload
@@ -878,13 +879,13 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
warning() << "versioned ns " << ns << " doesn't match stale config namespace "
<< staleNS;
- _handleStaleNS(txn, staleNS, forceReload);
+ _handleStaleNS(opCtx, staleNS, forceReload);
}
}
// Re-establish connections we need to
- startInit(txn);
- finishInit(txn);
+ startInit(opCtx);
+ finishInit(opCtx);
return;
}
@@ -924,7 +925,8 @@ void ParallelSortClusteredCursor::finishInit(OperationContext* txn) {
_cursors[index].reset(mdata.pcState->cursor.get(), &mdata);
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
_servers.insert(shard->getConnString().toString());
index++;
diff --git a/src/mongo/s/client/parallel.h b/src/mongo/s/client/parallel.h
index aeb709f0ffd..d375858bae0 100644
--- a/src/mongo/s/client/parallel.h
+++ b/src/mongo/s/client/parallel.h
@@ -91,7 +91,7 @@ public:
~ParallelSortClusteredCursor();
- void init(OperationContext* txn);
+ void init(OperationContext* opCtx);
bool more();
@@ -107,9 +107,9 @@ public:
private:
using ShardCursorsMap = std::map<ShardId, ParallelConnectionMetadata>;
- void fullInit(OperationContext* txn);
- void startInit(OperationContext* txn);
- void finishInit(OperationContext* txn);
+ void fullInit(OperationContext* opCtx);
+ void startInit(OperationContext* opCtx);
+ void finishInit(OperationContext* opCtx);
bool isCommand() {
return NamespaceString(_qSpec.ns()).isCommand();
@@ -117,11 +117,11 @@ private:
void _finishCons();
- void _markStaleNS(OperationContext* txn,
+ void _markStaleNS(OperationContext* opCtx,
const NamespaceString& staleNS,
const StaleConfigException& e,
bool& forceReload);
- void _handleStaleNS(OperationContext* txn, const NamespaceString& staleNS, bool forceReload);
+ void _handleStaleNS(OperationContext* opCtx, const NamespaceString& staleNS, bool forceReload);
bool _didInit;
bool _done;
@@ -150,7 +150,7 @@ private:
* set connection and the primary cannot be reached, the version
* will not be set if the slaveOk flag is set.
*/
- void setupVersionAndHandleSlaveOk(OperationContext* txn,
+ void setupVersionAndHandleSlaveOk(OperationContext* opCtx,
std::shared_ptr<ParallelConnectionState> state /* in & out */,
const ShardId& shardId,
std::shared_ptr<Shard> primary /* in */,
diff --git a/src/mongo/s/client/shard.cpp b/src/mongo/s/client/shard.cpp
index be61c500604..0702cdc91a5 100644
--- a/src/mongo/s/client/shard.cpp
+++ b/src/mongo/s/client/shard.cpp
@@ -110,27 +110,27 @@ bool Shard::isConfig() const {
return _id == "config";
}
-StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
+StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
RetryPolicy retryPolicy) {
- return runCommand(txn, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
+ return runCommand(opCtx, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
}
-StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
+StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
Milliseconds maxTimeMSOverride,
RetryPolicy retryPolicy) {
while (true) {
- auto interruptStatus = txn->checkForInterruptNoAssert();
+ auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
}
- auto hostWithResponse = _runCommand(txn, readPref, dbName, maxTimeMSOverride, cmdObj);
+ auto hostWithResponse = _runCommand(opCtx, readPref, dbName, maxTimeMSOverride, cmdObj);
auto swCmdResponse = std::move(hostWithResponse.commandResponse);
auto commandStatus = _getEffectiveCommandStatus(swCmdResponse);
@@ -147,29 +147,29 @@ StatusWith<Shard::CommandResponse> Shard::runCommand(OperationContext* txn,
}
StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
RetryPolicy retryPolicy) {
return runCommandWithFixedRetryAttempts(
- txn, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
+ opCtx, readPref, dbName, cmdObj, Milliseconds::max(), retryPolicy);
}
StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
Milliseconds maxTimeMSOverride,
RetryPolicy retryPolicy) {
for (int retry = 1; retry <= kOnErrorNumRetries; ++retry) {
- auto interruptStatus = txn->checkForInterruptNoAssert();
+ auto interruptStatus = opCtx->checkForInterruptNoAssert();
if (!interruptStatus.isOK()) {
return interruptStatus;
}
- auto hostWithResponse = _runCommand(txn, readPref, dbName, maxTimeMSOverride, cmdObj);
+ auto hostWithResponse = _runCommand(opCtx, readPref, dbName, maxTimeMSOverride, cmdObj);
auto swCmdResponse = std::move(hostWithResponse.commandResponse);
auto commandStatus = _getEffectiveCommandStatus(swCmdResponse);
@@ -186,7 +186,7 @@ StatusWith<Shard::CommandResponse> Shard::runCommandWithFixedRetryAttempts(
}
BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
- OperationContext* txn, const BatchedCommandRequest& batchRequest, RetryPolicy retryPolicy) {
+ OperationContext* opCtx, const BatchedCommandRequest& batchRequest, RetryPolicy retryPolicy) {
invariant(isConfig());
const std::string dbname = batchRequest.getNS().db().toString();
@@ -195,7 +195,7 @@ BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
const BSONObj cmdObj = batchRequest.toBSON();
for (int retry = 1; retry <= kOnErrorNumRetries; ++retry) {
- auto response = _runCommand(txn,
+ auto response = _runCommand(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
kDefaultConfigCommandTimeout,
@@ -221,7 +221,7 @@ BatchedCommandResponse Shard::runBatchWriteCommandOnConfig(
}
StatusWith<Shard::QueryResponse> Shard::exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -233,7 +233,7 @@ StatusWith<Shard::QueryResponse> Shard::exhaustiveFindOnConfig(
for (int retry = 1; retry <= kOnErrorNumRetries; retry++) {
auto result =
- _exhaustiveFindOnConfig(txn, readPref, readConcernLevel, nss, query, sort, limit);
+ _exhaustiveFindOnConfig(opCtx, readPref, readConcernLevel, nss, query, sort, limit);
if (retry < kOnErrorNumRetries &&
isRetriableError(result.getStatus().code(), RetryPolicy::kIdempotent)) {
diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h
index 26413b61c6d..a322af1bcf1 100644
--- a/src/mongo/s/client/shard.h
+++ b/src/mongo/s/client/shard.h
@@ -142,7 +142,7 @@ public:
* given "retryPolicy". Retries indefinitely until/unless a non-retriable error is encountered,
* the maxTimeMs on the OperationContext expires, or the operation is interrupted.
*/
- StatusWith<CommandResponse> runCommand(OperationContext* txn,
+ StatusWith<CommandResponse> runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -153,7 +153,7 @@ public:
* Runs for the lesser of the remaining time on the operation context or the specified maxTimeMS
* override.
*/
- StatusWith<CommandResponse> runCommand(OperationContext* txn,
+ StatusWith<CommandResponse> runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -166,7 +166,7 @@ public:
* Wherever possible this method should be avoided in favor of runCommand.
*/
StatusWith<CommandResponse> runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -178,7 +178,7 @@ public:
* Wherever possible this method should be avoided in favor of runCommand.
*/
StatusWith<CommandResponse> runCommandWithFixedRetryAttempts(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbName,
const BSONObj& cmdObj,
@@ -189,7 +189,7 @@ public:
* Expects a single-entry batch wrtie command and runs it on the config server's primary using
* the specified retry policy.
*/
- BatchedCommandResponse runBatchWriteCommandOnConfig(OperationContext* txn,
+ BatchedCommandResponse runBatchWriteCommandOnConfig(OperationContext* opCtx,
const BatchedCommandRequest& batchRequest,
RetryPolicy retryPolicy);
@@ -201,7 +201,7 @@ public:
* ShardRemote instances expect "readConcernLevel" to always be kMajorityReadConcern, whereas
* ShardLocal instances expect either kLocalReadConcern or kMajorityReadConcern.
*/
- StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* txn,
+ StatusWith<QueryResponse> exhaustiveFindOnConfig(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -214,7 +214,7 @@ public:
* so long as the options are the same.
* NOTE: Currently only supported for LocalShard.
*/
- virtual Status createIndexOnConfig(OperationContext* txn,
+ virtual Status createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) = 0;
@@ -252,14 +252,14 @@ private:
*
* NOTE: LocalShard implementation will not return a valid host and so should be ignored.
*/
- virtual HostWithResponse _runCommand(OperationContext* txn,
+ virtual HostWithResponse _runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbname,
Milliseconds maxTimeMSOverride,
const BSONObj& cmdObj) = 0;
virtual StatusWith<QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
diff --git a/src/mongo/s/client/shard_connection.cpp b/src/mongo/s/client/shard_connection.cpp
index ec80fb5c5e5..0a5d178ba3e 100644
--- a/src/mongo/s/client/shard_connection.cpp
+++ b/src/mongo/s/client/shard_connection.cpp
@@ -111,7 +111,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
mongo::BSONObj& cmdObj,
int options,
@@ -273,7 +273,7 @@ public:
s->avail = conn;
}
- void checkVersions(OperationContext* txn, const string& ns) {
+ void checkVersions(OperationContext* opCtx, const string& ns) {
vector<ShardId> all;
grid.shardRegistry()->getAllShardIds(&all);
@@ -283,7 +283,7 @@ public:
// Now only check top-level shard connections
for (const ShardId& shardId : all) {
try {
- auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus == ErrorCodes::ShardNotFound);
continue;
@@ -298,7 +298,7 @@ public:
s->created++; // After, so failed creation doesn't get counted
}
- versionManager.checkShardVersionCB(txn, s->avail, ns, false, 1);
+ versionManager.checkShardVersionCB(opCtx, s->avail, ns, false, 1);
} catch (const DBException& ex) {
warning() << "problem while initially checking shard versions on"
<< " " << shardId << causedBy(ex);
@@ -450,9 +450,9 @@ void ShardConnection::_finishInit() {
if (versionManager.isVersionableCB(_conn)) {
auto& client = cc();
- auto txn = client.getOperationContext();
- invariant(txn);
- _setVersion = versionManager.checkShardVersionCB(txn, this, false, 1);
+ auto opCtx = client.getOperationContext();
+ invariant(opCtx);
+ _setVersion = versionManager.checkShardVersionCB(opCtx, this, false, 1);
} else {
// Make sure we didn't specify a manager for a non-versionable connection (i.e. config)
verify(!_manager);
@@ -486,8 +486,8 @@ void ShardConnection::kill() {
}
}
-void ShardConnection::checkMyConnectionVersions(OperationContext* txn, const string& ns) {
- ClientConnections::threadInstance()->checkVersions(txn, ns);
+void ShardConnection::checkMyConnectionVersions(OperationContext* opCtx, const string& ns) {
+ ClientConnections::threadInstance()->checkVersions(opCtx, ns);
}
void ShardConnection::releaseMyConnections() {
diff --git a/src/mongo/s/client/shard_connection.h b/src/mongo/s/client/shard_connection.h
index db535aee5a7..62afb593f74 100644
--- a/src/mongo/s/client/shard_connection.h
+++ b/src/mongo/s/client/shard_connection.h
@@ -113,7 +113,7 @@ public:
}
/** checks all of my thread local connections for the version of this ns */
- static void checkMyConnectionVersions(OperationContext* txn, const std::string& ns);
+ static void checkMyConnectionVersions(OperationContext* opCtx, const std::string& ns);
/**
* Returns all the current sharded connections to the pool.
diff --git a/src/mongo/s/client/shard_local.cpp b/src/mongo/s/client/shard_local.cpp
index 54a0af07ba3..5270842a8ba 100644
--- a/src/mongo/s/client/shard_local.cpp
+++ b/src/mongo/s/client/shard_local.cpp
@@ -92,10 +92,10 @@ bool ShardLocal::isRetriableError(ErrorCodes::Error code, RetryPolicy options) {
}
}
-void ShardLocal::_updateLastOpTimeFromClient(OperationContext* txn,
+void ShardLocal::_updateLastOpTimeFromClient(OperationContext* opCtx,
const repl::OpTime& previousOpTimeOnClient) {
repl::OpTime lastOpTimeFromClient =
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
invariant(lastOpTimeFromClient >= previousOpTimeOnClient);
if (lastOpTimeFromClient.isNull() || lastOpTimeFromClient == previousOpTimeOnClient) {
return;
@@ -115,19 +115,19 @@ repl::OpTime ShardLocal::_getLastOpTime() {
return _lastOpTime;
}
-Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* txn,
+Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& unused,
const std::string& dbName,
Milliseconds maxTimeMSOverrideUnused,
const BSONObj& cmdObj) {
repl::OpTime currentOpTimeFromClient =
- repl::ReplClientInfo::forClient(txn->getClient()).getLastOp();
- ON_BLOCK_EXIT([this, &txn, &currentOpTimeFromClient] {
- _updateLastOpTimeFromClient(txn, currentOpTimeFromClient);
+ repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp();
+ ON_BLOCK_EXIT([this, &opCtx, &currentOpTimeFromClient] {
+ _updateLastOpTimeFromClient(opCtx, currentOpTimeFromClient);
});
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
rpc::UniqueReply commandResponse = client.runCommandWithMetadata(
dbName, cmdObj.firstElementFieldName(), rpc::makeEmptyMetadata(), cmdObj);
BSONObj responseReply = commandResponse->getCommandReply().getOwned();
@@ -147,29 +147,29 @@ Shard::HostWithResponse ShardLocal::_runCommand(OperationContext* txn,
}
StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
const BSONObj& query,
const BSONObj& sort,
boost::optional<long long> limit) {
- auto replCoord = repl::ReplicationCoordinator::get(txn);
+ auto replCoord = repl::ReplicationCoordinator::get(opCtx);
if (readConcernLevel == repl::ReadConcernLevel::kMajorityReadConcern) {
// Set up operation context with majority read snapshot so correct optime can be retrieved.
- Status status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
+ Status status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
// Wait for any writes performed by this ShardLocal instance to be committed and visible.
Status readConcernStatus = replCoord->waitUntilOpTimeForRead(
- txn, repl::ReadConcernArgs{_getLastOpTime(), readConcernLevel});
+ opCtx, repl::ReadConcernArgs{_getLastOpTime(), readConcernLevel});
if (!readConcernStatus.isOK()) {
return readConcernStatus;
}
// Inform the storage engine to read from the committed snapshot for the rest of this
// operation.
- status = txn->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
+ status = opCtx->recoveryUnit()->setReadFromMajorityCommittedSnapshot();
if (!status.isOK()) {
return status;
}
@@ -177,7 +177,7 @@ StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
invariant(readConcernLevel == repl::ReadConcernLevel::kLocalReadConcern);
}
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
Query fullQuery(query);
if (!sort.isEmpty()) {
fullQuery.sort(sort);
@@ -207,14 +207,14 @@ StatusWith<Shard::QueryResponse> ShardLocal::_exhaustiveFindOnConfig(
}
}
-Status ShardLocal::createIndexOnConfig(OperationContext* txn,
+Status ShardLocal::createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) {
invariant(ns.db() == "config" || ns.db() == "admin");
try {
- DBDirectClient client(txn);
+ DBDirectClient client(opCtx);
IndexSpec index;
index.addKeys(keys);
index.unique(unique);
diff --git a/src/mongo/s/client/shard_local.h b/src/mongo/s/client/shard_local.h
index 75e97ed0d29..3afdee97fb8 100644
--- a/src/mongo/s/client/shard_local.h
+++ b/src/mongo/s/client/shard_local.h
@@ -58,20 +58,20 @@ public:
bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final;
- Status createIndexOnConfig(OperationContext* txn,
+ Status createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) override;
private:
- Shard::HostWithResponse _runCommand(OperationContext* txn,
+ Shard::HostWithResponse _runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& unused,
const std::string& dbName,
Milliseconds maxTimeMSOverrideUnused,
const BSONObj& cmdObj) final;
StatusWith<Shard::QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -90,7 +90,7 @@ private:
* 'previousOpTimeOnClient' then the command we just ran didn't do a write, and we should leave
* _lastOpTime alone.
*/
- void _updateLastOpTimeFromClient(OperationContext* txn,
+ void _updateLastOpTimeFromClient(OperationContext* opCtx,
const repl::OpTime& previousOpTimeOnClient);
repl::OpTime _getLastOpTime();
diff --git a/src/mongo/s/client/shard_local_test.cpp b/src/mongo/s/client/shard_local_test.cpp
index c667be3ccd0..80d1c12ff81 100644
--- a/src/mongo/s/client/shard_local_test.cpp
+++ b/src/mongo/s/client/shard_local_test.cpp
@@ -47,7 +47,7 @@ namespace {
class ShardLocalTest : public ServiceContextMongoDTest {
protected:
- ServiceContext::UniqueOperationContext _txn;
+ ServiceContext::UniqueOperationContext _opCtx;
std::unique_ptr<ShardLocal> _shardLocal;
/**
@@ -81,17 +81,17 @@ private:
void ShardLocalTest::setUp() {
ServiceContextMongoDTest::setUp();
Client::initThreadIfNotAlready();
- _txn = getGlobalServiceContext()->makeOperationContext(&cc());
+ _opCtx = getGlobalServiceContext()->makeOperationContext(&cc());
serverGlobalParams.clusterRole = ClusterRole::ConfigServer;
_shardLocal = stdx::make_unique<ShardLocal>(ShardId("config"));
const repl::ReplSettings replSettings = {};
repl::setGlobalReplicationCoordinator(
- new repl::ReplicationCoordinatorMock(_txn->getServiceContext(), replSettings));
+ new repl::ReplicationCoordinatorMock(_opCtx->getServiceContext(), replSettings));
repl::getGlobalReplicationCoordinator()->setFollowerMode(repl::MemberState::RS_PRIMARY);
}
void ShardLocalTest::tearDown() {
- _txn.reset();
+ _opCtx.reset();
ServiceContextMongoDTest::tearDown();
repl::setGlobalReplicationCoordinator(nullptr);
}
@@ -106,7 +106,7 @@ StatusWith<Shard::CommandResponse> ShardLocalTest::runFindAndModifyRunCommand(Na
WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, Seconds(15)));
return _shardLocal->runCommandWithFixedRetryAttempts(
- _txn.get(),
+ _opCtx.get(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
findAndModifyRequest.toBSON(),
@@ -115,7 +115,7 @@ StatusWith<Shard::CommandResponse> ShardLocalTest::runFindAndModifyRunCommand(Na
StatusWith<std::vector<BSONObj>> ShardLocalTest::getIndexes(NamespaceString nss) {
auto response = _shardLocal->runCommandWithFixedRetryAttempts(
- _txn.get(),
+ _opCtx.get(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
BSON("listIndexes" << nss.coll().toString()),
@@ -148,7 +148,7 @@ StatusWith<Shard::QueryResponse> ShardLocalTest::runFindQuery(NamespaceString ns
BSONObj query,
BSONObj sort,
boost::optional<long long> limit) {
- return _shardLocal->exhaustiveFindOnConfig(_txn.get(),
+ return _shardLocal->exhaustiveFindOnConfig(_opCtx.get(),
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
repl::ReadConcernLevel::kMajorityReadConcern,
nss,
@@ -247,7 +247,7 @@ TEST_F(ShardLocalTest, CreateIndex) {
ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, getIndexes(nss).getStatus());
Status status =
- _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
+ _shardLocal->createIndexOnConfig(_opCtx.get(), nss, BSON("a" << 1 << "b" << 1), true);
// Creating the index should implicitly create the collection
ASSERT_OK(status);
@@ -256,13 +256,13 @@ TEST_F(ShardLocalTest, CreateIndex) {
ASSERT_EQ(2U, indexes.size());
// Making an identical index should be a no-op.
- status = _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), true);
+ status = _shardLocal->createIndexOnConfig(_opCtx.get(), nss, BSON("a" << 1 << "b" << 1), true);
ASSERT_OK(status);
indexes = unittest::assertGet(getIndexes(nss));
ASSERT_EQ(2U, indexes.size());
// Trying to make the same index as non-unique should fail.
- status = _shardLocal->createIndexOnConfig(_txn.get(), nss, BSON("a" << 1 << "b" << 1), false);
+ status = _shardLocal->createIndexOnConfig(_opCtx.get(), nss, BSON("a" << 1 << "b" << 1), false);
ASSERT_EQUALS(ErrorCodes::IndexOptionsConflict, status);
indexes = unittest::assertGet(getIndexes(nss));
ASSERT_EQ(2U, indexes.size());
diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp
index 5f30b22c8ea..7cadf2ed8d0 100644
--- a/src/mongo/s/client/shard_registry.cpp
+++ b/src/mongo/s/client/shard_registry.cpp
@@ -103,7 +103,7 @@ ConnectionString ShardRegistry::getConfigServerConnectionString() const {
return getConfigShard()->getConnString();
}
-StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* txn,
+StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* opCtx,
const ShardId& shardId) {
// If we know about the shard, return it.
auto shard = _data.findByShardId(shardId);
@@ -112,7 +112,7 @@ StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* txn,
}
// If we can't find the shard, attempt to reload the ShardRegistry.
- bool didReload = reload(txn);
+ bool didReload = reload(opCtx);
shard = _data.findByShardId(shardId);
// If we found the shard, return it.
@@ -128,7 +128,7 @@ StatusWith<shared_ptr<Shard>> ShardRegistry::getShard(OperationContext* txn,
// If we did not perform the reload ourselves (because there was a concurrent reload), force a
// reload again to ensure that we have seen data at least as up to date as our first reload.
- reload(txn);
+ reload(opCtx);
shard = _data.findByShardId(shardId);
if (shard) {
@@ -226,10 +226,10 @@ void ShardRegistry::_internalReload(const CallbackArgs& cbArgs) {
}
Client::initThreadIfNotAlready("shard registry reload");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
try {
- reload(txn.get());
+ reload(opCtx.get());
} catch (const DBException& e) {
log() << "Periodic reload of shard registry failed " << causedBy(e) << "; will retry after "
<< kRefreshPeriod;
@@ -257,7 +257,7 @@ bool ShardRegistry::isUp() const {
return _isUp;
}
-bool ShardRegistry::reload(OperationContext* txn) {
+bool ShardRegistry::reload(OperationContext* opCtx) {
stdx::unique_lock<stdx::mutex> reloadLock(_reloadMutex);
if (_reloadState == ReloadState::Reloading) {
@@ -290,7 +290,7 @@ bool ShardRegistry::reload(OperationContext* txn) {
});
- ShardRegistryData currData(txn, _shardFactory.get());
+ ShardRegistryData currData(opCtx, _shardFactory.get());
currData.addConfigShard(_data.getConfigShard());
_data.swap(currData);
@@ -325,10 +325,10 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se
const std::string& newConnectionString) {
// This is run in it's own thread. Exceptions escaping would result in a call to terminate.
Client::initThread("replSetChange");
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
try {
- std::shared_ptr<Shard> s = Grid::get(txn.get())->shardRegistry()->lookupRSName(setName);
+ std::shared_ptr<Shard> s = Grid::get(opCtx.get())->shardRegistry()->lookupRSName(setName);
if (!s) {
LOG(1) << "shard not found for set: " << newConnectionString
<< " when attempting to inform config servers of updated set membership";
@@ -340,13 +340,15 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se
return;
}
- auto status = Grid::get(txn.get())->catalogClient(txn.get())->updateConfigDocument(
- txn.get(),
- ShardType::ConfigNS,
- BSON(ShardType::name(s->getId().toString())),
- BSON("$set" << BSON(ShardType::host(newConnectionString))),
- false,
- ShardingCatalogClient::kMajorityWriteConcern);
+ auto status =
+ Grid::get(opCtx.get())
+ ->catalogClient(opCtx.get())
+ ->updateConfigDocument(opCtx.get(),
+ ShardType::ConfigNS,
+ BSON(ShardType::name(s->getId().toString())),
+ BSON("$set" << BSON(ShardType::host(newConnectionString))),
+ false,
+ ShardingCatalogClient::kMajorityWriteConcern);
if (!status.isOK()) {
error() << "RSChangeWatcher: could not update config db for set: " << setName
<< " to: " << newConnectionString << causedBy(status.getStatus());
@@ -360,13 +362,13 @@ void ShardRegistry::replicaSetChangeConfigServerUpdateHook(const std::string& se
////////////// ShardRegistryData //////////////////
-ShardRegistryData::ShardRegistryData(OperationContext* txn, ShardFactory* shardFactory) {
- _init(txn, shardFactory);
+ShardRegistryData::ShardRegistryData(OperationContext* opCtx, ShardFactory* shardFactory) {
+ _init(opCtx, shardFactory);
}
-void ShardRegistryData::_init(OperationContext* txn, ShardFactory* shardFactory) {
- auto shardsStatus =
- grid.catalogClient(txn)->getAllShards(txn, repl::ReadConcernLevel::kMajorityReadConcern);
+void ShardRegistryData::_init(OperationContext* opCtx, ShardFactory* shardFactory) {
+ auto shardsStatus = grid.catalogClient(opCtx)->getAllShards(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
uasserted(shardsStatus.getStatus().code(),
diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h
index 374329aad7d..92e31156e88 100644
--- a/src/mongo/s/client/shard_registry.h
+++ b/src/mongo/s/client/shard_registry.h
@@ -53,7 +53,7 @@ class ShardType;
class ShardRegistryData {
public:
- ShardRegistryData(OperationContext* txn, ShardFactory* shardFactory);
+ ShardRegistryData(OperationContext* opCtx, ShardFactory* shardFactory);
ShardRegistryData() = default;
~ShardRegistryData() = default;
@@ -101,7 +101,7 @@ private:
/**
* Reads shards docs from the catalog client and fills in maps.
*/
- void _init(OperationContext* txn, ShardFactory* factory);
+ void _init(OperationContext* opCtx, ShardFactory* factory);
/**
* Creates a shard based on the specified information and puts it into the lookup maps.
@@ -171,7 +171,7 @@ public:
* reloading is required, the caller should call this method one more time if the first call
* returned false.
*/
- bool reload(OperationContext* txn);
+ bool reload(OperationContext* opCtx);
/**
* Takes a connection string describing either a shard or config server replica set, looks
@@ -188,7 +188,7 @@ public:
* parameter can actually be the shard name or the HostAndPort for any
* server in the shard.
*/
- StatusWith<std::shared_ptr<Shard>> getShard(OperationContext* txn, const ShardId& shardId);
+ StatusWith<std::shared_ptr<Shard>> getShard(OperationContext* opCtx, const ShardId& shardId);
/**
* Returns a shared pointer to the shard object with the given shard id. The shardId parameter
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index dea32639d84..3be06bd6ca7 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -141,22 +141,22 @@ std::string ShardRemote::toString() const {
return getId().toString() + ":" + _originalConnString.toString();
}
-BSONObj ShardRemote::_appendMetadataForCommand(OperationContext* txn,
+BSONObj ShardRemote::_appendMetadataForCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref) {
BSONObjBuilder builder;
if (logger::globalLogDomain()->shouldLog(
logger::LogComponent::kTracking,
logger::LogSeverity::Debug(1))) { // avoid performance overhead if not logging
- if (!TrackingMetadata::get(txn).getIsLogged()) {
- if (!TrackingMetadata::get(txn).getOperId()) {
- TrackingMetadata::get(txn).initWithOperName("NotSet");
+ if (!TrackingMetadata::get(opCtx).getIsLogged()) {
+ if (!TrackingMetadata::get(opCtx).getOperId()) {
+ TrackingMetadata::get(opCtx).initWithOperName("NotSet");
}
MONGO_LOG_COMPONENT(1, logger::LogComponent::kTracking)
- << TrackingMetadata::get(txn).toString();
- TrackingMetadata::get(txn).setIsLogged(true);
+ << TrackingMetadata::get(opCtx).toString();
+ TrackingMetadata::get(opCtx).setIsLogged(true);
}
- TrackingMetadata metadata = TrackingMetadata::get(txn).constructChildMetadata();
+ TrackingMetadata metadata = TrackingMetadata::get(opCtx).constructChildMetadata();
metadata.writeToMetadata(&builder);
}
@@ -175,7 +175,7 @@ BSONObj ShardRemote::_appendMetadataForCommand(OperationContext* txn,
return builder.obj();
}
-Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
+Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const string& dbName,
Milliseconds maxTimeMSOverride,
@@ -185,26 +185,26 @@ Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
if (getId() == "config") {
readPrefWithMinOpTime.minOpTime = grid.configOpTime();
}
- const auto host = _targeter->findHost(txn, readPrefWithMinOpTime);
+ const auto host = _targeter->findHost(opCtx, readPrefWithMinOpTime);
if (!host.isOK()) {
return Shard::HostWithResponse(boost::none, host.getStatus());
}
const Milliseconds requestTimeout =
- std::min(txn->getRemainingMaxTimeMillis(), maxTimeMSOverride);
+ std::min(opCtx->getRemainingMaxTimeMillis(), maxTimeMSOverride);
const RemoteCommandRequest request(
host.getValue(),
dbName,
appendMaxTimeToCmdObj(requestTimeout, cmdObj),
- _appendMetadataForCommand(txn, readPrefWithMinOpTime),
- txn,
+ _appendMetadataForCommand(opCtx, readPrefWithMinOpTime),
+ opCtx,
requestTimeout < Milliseconds::max() ? requestTimeout : RemoteCommandRequest::kNoTimeout);
RemoteCommandResponse swResponse =
Status(ErrorCodes::InternalError, "Internal error running command");
- TaskExecutor* executor = Grid::get(txn)->getExecutorPool()->getFixedExecutor();
+ TaskExecutor* executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor();
auto callStatus = executor->scheduleRemoteCommand(
request,
[&swResponse](const RemoteCommandCallbackArgs& args) { swResponse = args.response; });
@@ -241,7 +241,7 @@ Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn,
}
StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
@@ -252,7 +252,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
ReadPreferenceSetting readPrefWithMinOpTime(readPref);
readPrefWithMinOpTime.minOpTime = grid.configOpTime();
- const auto host = _targeter->findHost(txn, readPrefWithMinOpTime);
+ const auto host = _targeter->findHost(opCtx, readPrefWithMinOpTime);
if (!host.isOK()) {
return host.getStatus();
}
@@ -313,7 +313,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
}
const Milliseconds maxTimeMS =
- std::min(txn->getRemainingMaxTimeMillis(), kDefaultConfigCommandTimeout);
+ std::min(opCtx->getRemainingMaxTimeMillis(), kDefaultConfigCommandTimeout);
BSONObjBuilder findCmdBuilder;
@@ -331,12 +331,12 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
qr.asFindCommand(&findCmdBuilder);
}
- Fetcher fetcher(Grid::get(txn)->getExecutorPool()->getFixedExecutor(),
+ Fetcher fetcher(Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(),
host.getValue(),
nss.db().toString(),
findCmdBuilder.done(),
fetcherCallback,
- _appendMetadataForCommand(txn, readPrefWithMinOpTime),
+ _appendMetadataForCommand(opCtx, readPrefWithMinOpTime),
maxTimeMS);
Status scheduleStatus = fetcher.schedule();
if (!scheduleStatus.isOK()) {
@@ -357,7 +357,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
return response;
}
-Status ShardRemote::createIndexOnConfig(OperationContext* txn,
+Status ShardRemote::createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) {
diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h
index 908ebf33eee..c7f6931d77e 100644
--- a/src/mongo/s/client/shard_remote.h
+++ b/src/mongo/s/client/shard_remote.h
@@ -70,7 +70,7 @@ public:
bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final;
- Status createIndexOnConfig(OperationContext* txn,
+ Status createIndexOnConfig(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& keys,
bool unique) override;
@@ -80,16 +80,17 @@ private:
* Returns the metadata that should be used when running commands against this shard with
* the given read preference.
*/
- BSONObj _appendMetadataForCommand(OperationContext* txn, const ReadPreferenceSetting& readPref);
+ BSONObj _appendMetadataForCommand(OperationContext* opCtx,
+ const ReadPreferenceSetting& readPref);
- Shard::HostWithResponse _runCommand(OperationContext* txn,
+ Shard::HostWithResponse _runCommand(OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const std::string& dbname,
Milliseconds maxTimeMSOverride,
const BSONObj& cmdObj) final;
StatusWith<QueryResponse> _exhaustiveFindOnConfig(
- OperationContext* txn,
+ OperationContext* opCtx,
const ReadPreferenceSetting& readPref,
const repl::ReadConcernLevel& readConcernLevel,
const NamespaceString& nss,
diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp
index 7085a290a0c..36cb24bf9fc 100644
--- a/src/mongo/s/client/sharding_connection_hook.cpp
+++ b/src/mongo/s/client/sharding_connection_hook.cpp
@@ -78,9 +78,9 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) {
});
}
conn->setRequestMetadataWriter(
- [this](OperationContext* txn, BSONObjBuilder* metadataBob, StringData hostStringData) {
+ [this](OperationContext* opCtx, BSONObjBuilder* metadataBob, StringData hostStringData) {
return _egressHook->writeRequestMetadata(
- _shardedConnections, txn, hostStringData, metadataBob);
+ _shardedConnections, opCtx, hostStringData, metadataBob);
});
diff --git a/src/mongo/s/client/version_manager.cpp b/src/mongo/s/client/version_manager.cpp
index 107579eeaab..299a89f5941 100644
--- a/src/mongo/s/client/version_manager.cpp
+++ b/src/mongo/s/client/version_manager.cpp
@@ -107,7 +107,7 @@ private:
/**
* Sends the setShardVersion command on the specified connection.
*/
-bool setShardVersion(OperationContext* txn,
+bool setShardVersion(OperationContext* opCtx,
DBClientBase* conn,
const string& ns,
const ConnectionString& configServer,
@@ -174,7 +174,7 @@ DBClientBase* getVersionable(DBClientBase* conn) {
* Eventually this should go completely away, but for now many commands rely on unversioned but
* mongos-specific behavior on mongod (auditing and replication information in commands)
*/
-bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) {
+bool initShardVersionEmptyNS(OperationContext* opCtx, DBClientBase* conn_in) {
try {
// May throw if replica set primary is down
DBClientBase* const conn = getVersionable(conn_in);
@@ -187,7 +187,7 @@ bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) {
}
BSONObj result;
- const bool ok = setShardVersion(txn,
+ const bool ok = setShardVersion(opCtx,
conn,
"",
grid.shardRegistry()->getConfigServerConnectionString(),
@@ -241,7 +241,7 @@ bool initShardVersionEmptyNS(OperationContext* txn, DBClientBase* conn_in) {
*
* @return true if we contacted the remote host
*/
-bool checkShardVersion(OperationContext* txn,
+bool checkShardVersion(OperationContext* opCtx,
DBClientBase* conn_in,
const string& ns,
shared_ptr<ChunkManager> refManager,
@@ -249,7 +249,7 @@ bool checkShardVersion(OperationContext* txn,
int tryNumber) {
// Empty namespaces are special - we require initialization but not versioning
if (ns.size() == 0) {
- return initShardVersionEmptyNS(txn, conn_in);
+ return initShardVersionEmptyNS(opCtx, conn_in);
}
DBClientBase* const conn = getVersionable(conn_in);
@@ -258,10 +258,10 @@ bool checkShardVersion(OperationContext* txn,
const NamespaceString nss(ns);
if (authoritative) {
- ScopedChunkManager::refreshAndGet(txn, nss);
+ ScopedChunkManager::refreshAndGet(opCtx, nss);
}
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return false;
@@ -283,7 +283,7 @@ bool checkShardVersion(OperationContext* txn,
return false;
}
- const auto shardRegistry = Grid::get(txn)->shardRegistry();
+ const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
const auto shard = shardRegistry->getShardForHostNoReload(
uassertStatusOK(HostAndPort::parse(conn->getServerAddress())));
@@ -350,7 +350,7 @@ bool checkShardVersion(OperationContext* txn,
<< ", current chunk manager iteration is " << officialSequenceNumber;
BSONObj result;
- if (setShardVersion(txn,
+ if (setShardVersion(opCtx,
conn,
ns,
shardRegistry->getConfigServerConnectionString(),
@@ -375,7 +375,7 @@ bool checkShardVersion(OperationContext* txn,
if (!authoritative) {
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
- checkShardVersion(txn, conn_in, ns, refManager, 1, tryNumber + 1);
+ checkShardVersion(opCtx, conn_in, ns, refManager, 1, tryNumber + 1);
return true;
}
@@ -384,10 +384,10 @@ bool checkShardVersion(OperationContext* txn,
warning() << "reloading full configuration for " << conf->name()
<< ", connection state indicates significant version changes";
- Grid::get(txn)->catalogCache()->invalidate(nss.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(nss.db());
}
- conf->getChunkManager(txn, nss.ns(), true);
+ conf->getChunkManager(opCtx, nss.ns(), true);
}
const int maxNumTries = 7;
@@ -397,7 +397,7 @@ bool checkShardVersion(OperationContext* txn,
sleepmillis(10 * tryNumber);
// use the original connection and get a fresh versionable connection
// since conn can be invalidated (or worse, freed) after the failure
- checkShardVersion(txn, conn_in, ns, refManager, true, tryNumber + 1);
+ checkShardVersion(opCtx, conn_in, ns, refManager, true, tryNumber + 1);
return true;
}
@@ -426,20 +426,20 @@ bool VersionManager::isVersionableCB(DBClientBase* conn) {
return conn->type() == ConnectionString::MASTER || conn->type() == ConnectionString::SET;
}
-bool VersionManager::checkShardVersionCB(OperationContext* txn,
+bool VersionManager::checkShardVersionCB(OperationContext* opCtx,
DBClientBase* conn_in,
const string& ns,
bool authoritative,
int tryNumber) {
- return checkShardVersion(txn, conn_in, ns, nullptr, authoritative, tryNumber);
+ return checkShardVersion(opCtx, conn_in, ns, nullptr, authoritative, tryNumber);
}
-bool VersionManager::checkShardVersionCB(OperationContext* txn,
+bool VersionManager::checkShardVersionCB(OperationContext* opCtx,
ShardConnection* conn_in,
bool authoritative,
int tryNumber) {
return checkShardVersion(
- txn, conn_in->get(), conn_in->getNS(), conn_in->getManager(), authoritative, tryNumber);
+ opCtx, conn_in->get(), conn_in->getNS(), conn_in->getManager(), authoritative, tryNumber);
}
} // namespace mongo
diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp
index 741a280ab4c..37a63a5cee2 100644
--- a/src/mongo/s/cluster_identity_loader.cpp
+++ b/src/mongo/s/cluster_identity_loader.cpp
@@ -60,7 +60,7 @@ OID ClusterIdentityLoader::getClusterId() {
return _lastLoadResult.getValue();
}
-Status ClusterIdentityLoader::loadClusterId(OperationContext* txn,
+Status ClusterIdentityLoader::loadClusterId(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel) {
stdx::unique_lock<stdx::mutex> lk(_mutex);
if (_initializationState == InitializationState::kInitialized) {
@@ -79,7 +79,7 @@ Status ClusterIdentityLoader::loadClusterId(OperationContext* txn,
_initializationState = InitializationState::kLoading;
lk.unlock();
- auto loadStatus = _fetchClusterIdFromConfig(txn, readConcernLevel);
+ auto loadStatus = _fetchClusterIdFromConfig(opCtx, readConcernLevel);
lk.lock();
invariant(_initializationState == InitializationState::kLoading);
@@ -94,9 +94,9 @@ Status ClusterIdentityLoader::loadClusterId(OperationContext* txn,
}
StatusWith<OID> ClusterIdentityLoader::_fetchClusterIdFromConfig(
- OperationContext* txn, const repl::ReadConcernLevel& readConcernLevel) {
- auto catalogClient = Grid::get(txn)->catalogClient(txn);
- auto loadResult = catalogClient->getConfigVersion(txn, readConcernLevel);
+ OperationContext* opCtx, const repl::ReadConcernLevel& readConcernLevel) {
+ auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
+ auto loadResult = catalogClient->getConfigVersion(opCtx, readConcernLevel);
if (!loadResult.isOK()) {
return Status(loadResult.getStatus().code(),
str::stream() << "Error loading clusterID"
diff --git a/src/mongo/s/cluster_identity_loader.h b/src/mongo/s/cluster_identity_loader.h
index d34f5368850..c8871cb1c56 100644
--- a/src/mongo/s/cluster_identity_loader.h
+++ b/src/mongo/s/cluster_identity_loader.h
@@ -71,7 +71,7 @@ public:
* If another thread is already in the process of loading the cluster ID, concurrent calls will
* wait for that thread to finish and then return its results.
*/
- Status loadClusterId(OperationContext* txn, const repl::ReadConcernLevel& readConcernLevel);
+ Status loadClusterId(OperationContext* opCtx, const repl::ReadConcernLevel& readConcernLevel);
/**
* Called if the config.version document is rolled back. Notifies the ClusterIdentityLoader
@@ -90,7 +90,7 @@ private:
* Queries the config.version collection on the config server, extracts the cluster ID from
* the version document, and returns it.
*/
- StatusWith<OID> _fetchClusterIdFromConfig(OperationContext* txn,
+ StatusWith<OID> _fetchClusterIdFromConfig(OperationContext* opCtx,
const repl::ReadConcernLevel& readConcernLevel);
stdx::mutex _mutex;
diff --git a/src/mongo/s/commands/chunk_manager_targeter.cpp b/src/mongo/s/commands/chunk_manager_targeter.cpp
index 9dfef97dfda..8296b0066a9 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.cpp
+++ b/src/mongo/s/commands/chunk_manager_targeter.cpp
@@ -117,7 +117,7 @@ UpdateType getUpdateExprType(const BSONObj& updateExpr) {
* { _id : { $lt : 30 } } => false
* { foo : <anything> } => false
*/
-bool isExactIdQuery(OperationContext* txn, const CanonicalQuery& query, ChunkManager* manager) {
+bool isExactIdQuery(OperationContext* opCtx, const CanonicalQuery& query, ChunkManager* manager) {
auto shardKey = virtualIdShardKey.extractShardKeyFromQuery(query);
BSONElement idElt = shardKey["_id"];
@@ -288,8 +288,8 @@ ChunkManagerTargeter::ChunkManagerTargeter(const NamespaceString& nss, TargeterS
: _nss(nss), _needsTargetingRefresh(false), _stats(stats) {}
-Status ChunkManagerTargeter::init(OperationContext* txn) {
- auto scopedCMStatus = ScopedChunkManager::getOrCreate(txn, _nss);
+Status ChunkManagerTargeter::init(OperationContext* opCtx) {
+ auto scopedCMStatus = ScopedChunkManager::getOrCreate(opCtx, _nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -305,7 +305,7 @@ const NamespaceString& ChunkManagerTargeter::getNS() const {
return _nss;
}
-Status ChunkManagerTargeter::targetInsert(OperationContext* txn,
+Status ChunkManagerTargeter::targetInsert(OperationContext* opCtx,
const BSONObj& doc,
ShardEndpoint** endpoint) const {
BSONObj shardKey;
@@ -349,7 +349,7 @@ Status ChunkManagerTargeter::targetInsert(OperationContext* txn,
return Status::OK();
}
-Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
+Status ChunkManagerTargeter::targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
vector<ShardEndpoint*>* endpoints) const {
//
@@ -393,7 +393,7 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
if (updateType == UpdateType_OpStyle) {
// Target using the query
StatusWith<BSONObj> status =
- _manager->getShardKeyPattern().extractShardKeyFromQuery(txn, query);
+ _manager->getShardKeyPattern().extractShardKeyFromQuery(opCtx, query);
// Bad query
if (!status.isOK())
@@ -445,7 +445,7 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
if (!collation.isEmpty()) {
qr->setCollation(collation);
}
- auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto cq = CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!cq.isOK()) {
return Status(cq.getStatus().code(),
str::stream() << "Could not parse update query " << updateDoc.getQuery()
@@ -453,7 +453,8 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
}
// Single (non-multi) updates must target a single shard or be exact-ID.
- if (_manager && !updateDoc.getMulti() && !isExactIdQuery(txn, *cq.getValue(), _manager.get())) {
+ if (_manager && !updateDoc.getMulti() &&
+ !isExactIdQuery(opCtx, *cq.getValue(), _manager.get())) {
return Status(ErrorCodes::ShardKeyNotFound,
str::stream()
<< "A single update on a sharded collection must contain an exact "
@@ -466,13 +467,13 @@ Status ChunkManagerTargeter::targetUpdate(OperationContext* txn,
}
if (updateType == UpdateType_OpStyle) {
- return targetQuery(txn, query, collation, endpoints);
+ return targetQuery(opCtx, query, collation, endpoints);
} else {
- return targetDoc(txn, updateExpr, collation, endpoints);
+ return targetDoc(opCtx, updateExpr, collation, endpoints);
}
}
-Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
+Status ChunkManagerTargeter::targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
vector<ShardEndpoint*>* endpoints) const {
BSONObj shardKey;
@@ -486,7 +487,7 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
// Get the shard key
StatusWith<BSONObj> status =
- _manager->getShardKeyPattern().extractShardKeyFromQuery(txn, deleteDoc.getQuery());
+ _manager->getShardKeyPattern().extractShardKeyFromQuery(opCtx, deleteDoc.getQuery());
// Bad query
if (!status.isOK())
@@ -516,7 +517,7 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
if (!collation.isEmpty()) {
qr->setCollation(collation);
}
- auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto cq = CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!cq.isOK()) {
return Status(cq.getStatus().code(),
str::stream() << "Could not parse delete query " << deleteDoc.getQuery()
@@ -525,7 +526,7 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
// Single deletes must target a single shard or be exact-ID.
if (_manager && deleteDoc.getLimit() == 1 &&
- !isExactIdQuery(txn, *cq.getValue(), _manager.get())) {
+ !isExactIdQuery(opCtx, *cq.getValue(), _manager.get())) {
return Status(ErrorCodes::ShardKeyNotFound,
str::stream()
<< "A single delete on a sharded collection must contain an exact "
@@ -537,19 +538,19 @@ Status ChunkManagerTargeter::targetDelete(OperationContext* txn,
<< _manager->getShardKeyPattern().toString());
}
- return targetQuery(txn, deleteDoc.getQuery(), collation, endpoints);
+ return targetQuery(opCtx, deleteDoc.getQuery(), collation, endpoints);
}
-Status ChunkManagerTargeter::targetDoc(OperationContext* txn,
+Status ChunkManagerTargeter::targetDoc(OperationContext* opCtx,
const BSONObj& doc,
const BSONObj& collation,
vector<ShardEndpoint*>* endpoints) const {
// NOTE: This is weird and fragile, but it's the way our language works right now -
// documents are either A) invalid or B) valid equality queries over themselves.
- return targetQuery(txn, doc, collation, endpoints);
+ return targetQuery(opCtx, doc, collation, endpoints);
}
-Status ChunkManagerTargeter::targetQuery(OperationContext* txn,
+Status ChunkManagerTargeter::targetQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
vector<ShardEndpoint*>* endpoints) const {
@@ -562,7 +563,7 @@ Status ChunkManagerTargeter::targetQuery(OperationContext* txn,
set<ShardId> shardIds;
if (_manager) {
try {
- _manager->getShardIdsForQuery(txn, query, collation, &shardIds);
+ _manager->getShardIdsForQuery(opCtx, query, collation, &shardIds);
} catch (const DBException& ex) {
return ex.toStatus();
}
@@ -671,7 +672,7 @@ void ChunkManagerTargeter::noteCouldNotTarget() {
_needsTargetingRefresh = true;
}
-Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasChanged) {
+Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) {
bool dummy;
if (!wasChanged) {
wasChanged = &dummy;
@@ -694,7 +695,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
shared_ptr<ChunkManager> lastManager = _manager;
shared_ptr<Shard> lastPrimary = _primary;
- auto scopedCMStatus = ScopedChunkManager::getOrCreate(txn, _nss);
+ auto scopedCMStatus = ScopedChunkManager::getOrCreate(opCtx, _nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
@@ -724,7 +725,7 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
// If didn't already refresh the targeting information, refresh it
if (!alreadyRefreshed) {
// To match previous behavior, we just need an incremental refresh here
- return refreshNow(txn, RefreshType_RefreshChunkManager);
+ return refreshNow(opCtx, RefreshType_RefreshChunkManager);
}
*wasChanged = isMetadataDifferent(lastManager, lastPrimary, _manager, _primary);
@@ -740,10 +741,10 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
if (result == CompareResult_Unknown) {
// Our current shard versions aren't all comparable to the old versions, maybe drop
- return refreshNow(txn, RefreshType_ReloadDatabase);
+ return refreshNow(opCtx, RefreshType_ReloadDatabase);
} else if (result == CompareResult_LT) {
// Our current shard versions are less than the remote versions, but no drop
- return refreshNow(txn, RefreshType_RefreshChunkManager);
+ return refreshNow(opCtx, RefreshType_RefreshChunkManager);
}
*wasChanged = isMetadataDifferent(lastManager, lastPrimary, _manager, _primary);
@@ -755,17 +756,17 @@ Status ChunkManagerTargeter::refreshIfNeeded(OperationContext* txn, bool* wasCha
return Status::OK();
}
-Status ChunkManagerTargeter::refreshNow(OperationContext* txn, RefreshType refreshType) {
+Status ChunkManagerTargeter::refreshNow(OperationContext* opCtx, RefreshType refreshType) {
if (refreshType == RefreshType_ReloadDatabase) {
- Grid::get(txn)->catalogCache()->invalidate(_nss.db().toString());
+ Grid::get(opCtx)->catalogCache()->invalidate(_nss.db().toString());
}
// Try not to spam the configs
refreshBackoff();
- ScopedChunkManager::refreshAndGet(txn, _nss);
+ ScopedChunkManager::refreshAndGet(opCtx, _nss);
- auto scopedCMStatus = ScopedChunkManager::get(txn, _nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, _nss);
if (!scopedCMStatus.isOK()) {
return scopedCMStatus.getStatus();
}
diff --git a/src/mongo/s/commands/chunk_manager_targeter.h b/src/mongo/s/commands/chunk_manager_targeter.h
index 9c8f136dad8..049cdb8d858 100644
--- a/src/mongo/s/commands/chunk_manager_targeter.h
+++ b/src/mongo/s/commands/chunk_manager_targeter.h
@@ -69,20 +69,22 @@ public:
*
* Returns !OK if the information could not be initialized.
*/
- Status init(OperationContext* txn);
+ Status init(OperationContext* opCtx);
const NamespaceString& getNS() const;
// Returns ShardKeyNotFound if document does not have a full shard key.
- Status targetInsert(OperationContext* txn, const BSONObj& doc, ShardEndpoint** endpoint) const;
+ Status targetInsert(OperationContext* opCtx,
+ const BSONObj& doc,
+ ShardEndpoint** endpoint) const;
// Returns ShardKeyNotFound if the update can't be targeted without a shard key.
- Status targetUpdate(OperationContext* txn,
+ Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints) const;
// Returns ShardKeyNotFound if the delete can't be targeted without a shard key.
- Status targetDelete(OperationContext* txn,
+ Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints) const;
@@ -103,7 +105,7 @@ public:
*
* Also see NSTargeter::refreshIfNeeded().
*/
- Status refreshIfNeeded(OperationContext* txn, bool* wasChanged);
+ Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged);
private:
// Different ways we can refresh metadata
@@ -120,7 +122,7 @@ private:
/**
* Performs an actual refresh from the config server.
*/
- Status refreshNow(OperationContext* txn, RefreshType refreshType);
+ Status refreshNow(OperationContext* opCtx, RefreshType refreshType);
/**
* Returns a vector of ShardEndpoints where a document might need to be placed.
@@ -129,7 +131,7 @@ private:
*
* If 'collation' is empty, we use the collection default collation for targeting.
*/
- Status targetDoc(OperationContext* txn,
+ Status targetDoc(OperationContext* opCtx,
const BSONObj& doc,
const BSONObj& collation,
std::vector<ShardEndpoint*>* endpoints) const;
@@ -141,7 +143,7 @@ private:
*
* If 'collation' is empty, we use the collection default collation for targeting.
*/
- Status targetQuery(OperationContext* txn,
+ Status targetQuery(OperationContext* opCtx,
const BSONObj& query,
const BSONObj& collation,
std::vector<ShardEndpoint*>* endpoints) const;
diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
index e2cb0e4f117..c54804992d2 100644
--- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp
@@ -80,7 +80,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -88,9 +88,9 @@ public:
BSONObjBuilder& result) {
auto parsedRequest = uassertStatusOK(AddShardRequest::parseFromMongosCommand(cmdObj));
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
parsedRequest.toCommandForConfig(),
@@ -103,8 +103,8 @@ public:
result << "shardAdded" << shardAdded;
// Ensure the added shard is visible to this process.
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- if (!shardRegistry->getShard(txn, shardAdded).isOK()) {
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ if (!shardRegistry->getShard(opCtx, shardAdded).isOK()) {
return appendCommandStatus(result,
{ErrorCodes::OperationFailed,
"Could not find shard metadata for shard after adding it. "
diff --git a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
index 6ab4e01cd54..43f2b4a3539 100644
--- a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp
@@ -96,7 +96,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -108,9 +108,9 @@ public:
parsedRequest.appendAsConfigCommand(&cmdBuilder);
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
cmdBuilder.obj(),
diff --git a/src/mongo/s/commands/cluster_aggregate.cpp b/src/mongo/s/commands/cluster_aggregate.cpp
index 07bf5dcb5c9..a6887ea0498 100644
--- a/src/mongo/s/commands/cluster_aggregate.cpp
+++ b/src/mongo/s/commands/cluster_aggregate.cpp
@@ -61,12 +61,13 @@
namespace mongo {
-Status ClusterAggregate::runAggregate(OperationContext* txn,
+Status ClusterAggregate::runAggregate(OperationContext* opCtx,
const Namespaces& namespaces,
BSONObj cmdObj,
int options,
BSONObjBuilder* result) {
- auto scopedShardDbStatus = ScopedShardDatabase::getExisting(txn, namespaces.executionNss.db());
+ auto scopedShardDbStatus =
+ ScopedShardDatabase::getExisting(opCtx, namespaces.executionNss.db());
if (!scopedShardDbStatus.isOK()) {
appendEmptyResultSet(
*result, scopedShardDbStatus.getStatus(), namespaces.requestedNss.ns());
@@ -96,21 +97,21 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
}
if (!conf->isSharded(namespaces.executionNss.ns())) {
- return aggPassthrough(txn, namespaces, conf, cmdObj, result, options);
+ return aggPassthrough(opCtx, namespaces, conf, cmdObj, result, options);
}
- auto chunkMgr = conf->getChunkManager(txn, namespaces.executionNss.ns());
+ auto chunkMgr = conf->getChunkManager(opCtx, namespaces.executionNss.ns());
std::unique_ptr<CollatorInterface> collation;
if (!request.getValue().getCollation().isEmpty()) {
- collation = uassertStatusOK(CollatorFactoryInterface::get(txn->getServiceContext())
+ collation = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(request.getValue().getCollation()));
} else if (chunkMgr->getDefaultCollator()) {
collation = chunkMgr->getDefaultCollator()->clone();
}
boost::intrusive_ptr<ExpressionContext> mergeCtx = new ExpressionContext(
- txn, request.getValue(), std::move(collation), std::move(resolvedNamespaces));
+ opCtx, request.getValue(), std::move(collation), std::move(resolvedNamespaces));
mergeCtx->inRouter = true;
// explicitly *not* setting mergeCtx->tempDir
@@ -127,7 +128,7 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
const bool singleShard = [&]() {
BSONObj firstMatchQuery = pipeline.getValue()->getInitialQuery();
BSONObj shardKeyMatches = uassertStatusOK(
- chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(txn, firstMatchQuery));
+ chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(opCtx, firstMatchQuery));
if (shardKeyMatches.isEmpty()) {
return false;
@@ -176,7 +177,7 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
// Run the command on the shards
// TODO need to make sure cursors are killed if a retry is needed
std::vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
namespaces.executionNss.db().toString(),
shardedCommand,
options,
@@ -210,14 +211,14 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
if (!needSplit) {
invariant(shardResults.size() == 1);
invariant(shardResults[0].target.getServers().size() == 1);
- auto executorPool = Grid::get(txn)->getExecutorPool();
+ auto executorPool = Grid::get(opCtx)->getExecutorPool();
const BSONObj reply =
- uassertStatusOK(storePossibleCursor(txn,
+ uassertStatusOK(storePossibleCursor(opCtx,
shardResults[0].target.getServers()[0],
shardResults[0].result,
namespaces.requestedNss,
executorPool->getArbitraryExecutor(),
- Grid::get(txn)->getCursorManager()));
+ Grid::get(opCtx)->getCursorManager()));
result->appendElements(reply);
return getStatusFromCommandResult(reply);
}
@@ -258,17 +259,17 @@ Status ClusterAggregate::runAggregate(OperationContext* txn,
// Run merging command on random shard, unless a stage needs the primary shard. Need to use
// ShardConnection so that the merging mongod is sent the config servers on connection init.
- auto& prng = txn->getClient()->getPrng();
+ auto& prng = opCtx->getClient()->getPrng();
const auto& mergingShardId =
(needPrimaryShardMerger || internalQueryAlwaysMergeOnPrimaryShard.load())
? conf->getPrimaryId()
: shardResults[prng.nextInt32(shardResults.size())].shardTargetId;
const auto mergingShard =
- uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, mergingShardId));
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, mergingShardId));
ShardConnection conn(mergingShard->getConnString(), outputNsOrEmpty);
BSONObj mergedResults =
- aggRunCommand(txn, conn.get(), namespaces, mergeCmd.freeze().toBson(), options);
+ aggRunCommand(opCtx, conn.get(), namespaces, mergeCmd.freeze().toBson(), options);
conn.done();
if (auto wcErrorElem = mergedResults["writeConcernError"]) {
@@ -385,7 +386,7 @@ void ClusterAggregate::killAllCursors(const std::vector<Strategy::CommandResult>
}
}
-BSONObj ClusterAggregate::aggRunCommand(OperationContext* txn,
+BSONObj ClusterAggregate::aggRunCommand(OperationContext* opCtx,
DBClientBase* conn,
const Namespaces& namespaces,
BSONObj cmd,
@@ -413,30 +414,30 @@ BSONObj ClusterAggregate::aggRunCommand(OperationContext* txn,
throw RecvStaleConfigException("command failed because of stale config", result);
}
- auto executorPool = Grid::get(txn)->getExecutorPool();
- result = uassertStatusOK(storePossibleCursor(txn,
+ auto executorPool = Grid::get(opCtx)->getExecutorPool();
+ result = uassertStatusOK(storePossibleCursor(opCtx,
HostAndPort(cursor->originalHost()),
result,
namespaces.requestedNss,
executorPool->getArbitraryExecutor(),
- Grid::get(txn)->getCursorManager()));
+ Grid::get(opCtx)->getCursorManager()));
return result;
}
-Status ClusterAggregate::aggPassthrough(OperationContext* txn,
+Status ClusterAggregate::aggPassthrough(OperationContext* opCtx,
const Namespaces& namespaces,
DBConfig* conf,
BSONObj cmdObj,
BSONObjBuilder* out,
int queryOptions) {
// Temporary hack. See comment on declaration for details.
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
ShardConnection conn(shardStatus.getValue()->getConnString(), "");
- BSONObj result = aggRunCommand(txn, conn.get(), namespaces, cmdObj, queryOptions);
+ BSONObj result = aggRunCommand(opCtx, conn.get(), namespaces, cmdObj, queryOptions);
conn.done();
// First append the properly constructed writeConcernError. It will then be skipped
@@ -472,7 +473,8 @@ Status ClusterAggregate::aggPassthrough(OperationContext* txn,
Namespaces nsStruct;
nsStruct.requestedNss = namespaces.requestedNss;
nsStruct.executionNss = resolvedView.getNamespace();
- return ClusterAggregate::runAggregate(txn, nsStruct, aggCmd.getValue(), queryOptions, out);
+ return ClusterAggregate::runAggregate(
+ opCtx, nsStruct, aggCmd.getValue(), queryOptions, out);
}
return getStatusFromCommandResult(result);
diff --git a/src/mongo/s/commands/cluster_aggregate.h b/src/mongo/s/commands/cluster_aggregate.h
index da8e7bb46bc..b0fdd5d7375 100644
--- a/src/mongo/s/commands/cluster_aggregate.h
+++ b/src/mongo/s/commands/cluster_aggregate.h
@@ -63,7 +63,7 @@ public:
* Executes an aggregation command. 'cmdObj' specifies the aggregation to run. Fills in 'result'
* with the command response.
*/
- static Status runAggregate(OperationContext* txn,
+ static Status runAggregate(OperationContext* opCtx,
const Namespaces& namespaces,
BSONObj cmdObj,
int options,
@@ -82,13 +82,13 @@ private:
// could be different from conn->getServerAddress() for connections that map to
// multiple servers such as for replica sets. These also take care of registering
// returned cursors.
- static BSONObj aggRunCommand(OperationContext* txn,
+ static BSONObj aggRunCommand(OperationContext* opCtx,
DBClientBase* conn,
const Namespaces& namespaces,
BSONObj cmd,
int queryOptions);
- static Status aggPassthrough(OperationContext* txn,
+ static Status aggPassthrough(OperationContext* opCtx,
const Namespaces& namespaces,
DBConfig* conf,
BSONObj cmd,
diff --git a/src/mongo/s/commands/cluster_apply_ops_cmd.cpp b/src/mongo/s/commands/cluster_apply_ops_cmd.cpp
index c8928784422..a0fc658a5dd 100644
--- a/src/mongo/s/commands/cluster_apply_ops_cmd.cpp
+++ b/src/mongo/s/commands/cluster_apply_ops_cmd.cpp
@@ -50,13 +50,13 @@ public:
return true;
}
- Status checkAuthForOperation(OperationContext* txn,
+ Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) override {
- return checkAuthForApplyOpsCommand(txn, dbname, cmdObj);
+ return checkAuthForApplyOpsCommand(opCtx, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_available_query_options_cmd.cpp b/src/mongo/s/commands/cluster_available_query_options_cmd.cpp
index 0eed4f14c9b..1dcffe67fb4 100644
--- a/src/mongo/s/commands/cluster_available_query_options_cmd.cpp
+++ b/src/mongo/s/commands/cluster_available_query_options_cmd.cpp
@@ -53,7 +53,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_commands_common.cpp b/src/mongo/s/commands/cluster_commands_common.cpp
index b4035da68c9..3ad8c373b9a 100644
--- a/src/mongo/s/commands/cluster_commands_common.cpp
+++ b/src/mongo/s/commands/cluster_commands_common.cpp
@@ -51,13 +51,13 @@ using std::string;
namespace {
-bool forceRemoteCheckShardVersionCB(OperationContext* txn, const string& ns) {
+bool forceRemoteCheckShardVersionCB(OperationContext* opCtx, const string& ns) {
const NamespaceString nss(ns);
// This will force the database catalog entry to be reloaded
- Grid::get(txn)->catalogCache()->invalidate(nss.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(nss.db());
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (!scopedCMStatus.isOK()) {
return false;
}
@@ -113,7 +113,7 @@ void Future::CommandResult::init() {
}
}
-bool Future::CommandResult::join(OperationContext* txn, int maxRetries) {
+bool Future::CommandResult::join(OperationContext* opCtx, int maxRetries) {
if (_done) {
return _ok;
}
@@ -155,7 +155,7 @@ bool Future::CommandResult::join(OperationContext* txn, int maxRetries) {
}
if (i >= maxRetries / 2) {
- if (!forceRemoteCheckShardVersionCB(txn, staleNS)) {
+ if (!forceRemoteCheckShardVersionCB(opCtx, staleNS)) {
error() << "Future::spawnCommand (part 2) no config detected"
<< causedBy(redact(e));
throw e;
@@ -169,7 +169,7 @@ bool Future::CommandResult::join(OperationContext* txn, int maxRetries) {
<< "for lazy command " << redact(_cmd) << ", could not refresh "
<< staleNS;
} else {
- versionManager.checkShardVersionCB(txn, _conn, staleNS, false, 1);
+ versionManager.checkShardVersionCB(opCtx, _conn, staleNS, false, 1);
}
LOG(i > 1 ? 0 : 1) << "retrying lazy command" << causedBy(redact(e));
@@ -242,13 +242,13 @@ bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::stri
return Command::appendCommandStatus(result, status);
}
-std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* txn,
+std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opCtx,
StringData dbName) {
const auto dbNameStr = dbName.toString();
std::vector<CollectionType> collectionsOnConfig;
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getCollections(
- txn, &dbNameStr, &collectionsOnConfig, nullptr));
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getCollections(
+ opCtx, &dbNameStr, &collectionsOnConfig, nullptr));
std::vector<NamespaceString> collectionsToReturn;
for (const auto& coll : collectionsOnConfig) {
diff --git a/src/mongo/s/commands/cluster_commands_common.h b/src/mongo/s/commands/cluster_commands_common.h
index 819fd8738ea..7d6465bc400 100644
--- a/src/mongo/s/commands/cluster_commands_common.h
+++ b/src/mongo/s/commands/cluster_commands_common.h
@@ -73,7 +73,7 @@ public:
blocks until command is done
returns ok()
*/
- bool join(OperationContext* txn, int maxRetries = 1);
+ bool join(OperationContext* opCtx, int maxRetries = 1);
private:
CommandResult(const std::string& server,
@@ -137,7 +137,7 @@ bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::stri
*
* Throws exception on errors.
*/
-std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* txn,
+std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opCtx,
StringData dbName);
} // namespace mongo
diff --git a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
index 39da91dd362..b1dabe7ad8c 100644
--- a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
+++ b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp
@@ -77,15 +77,15 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) override {
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponse = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
BSON(_configsvrCommandName << 1),
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index ec2798bb62e..2fcf11086b9 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -100,7 +100,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -168,7 +168,7 @@ public:
}
vector<Strategy::CommandResult> countResult;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbname,
countCmdBuilder.done(),
options,
@@ -198,7 +198,7 @@ public:
BSONObjBuilder aggResult;
Command::findCommand("aggregate")
- ->run(txn, dbname, aggCmd.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbname, aggCmd.getValue(), options, errmsg, aggResult);
result.resetToEmpty();
ViewResponseFormatter formatter(aggResult.obj());
@@ -247,7 +247,7 @@ public:
return true;
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -285,7 +285,7 @@ public:
Timer timer;
vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbname,
explainCmdBob.obj(),
options,
@@ -316,7 +316,7 @@ public:
std::string errMsg;
if (Command::findCommand("aggregate")
- ->run(txn, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
+ ->run(opCtx, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
return Status::OK();
}
@@ -326,7 +326,7 @@ public:
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
return ClusterExplain::buildExplainResult(
- txn, shardResults, mongosStageName, millisElapsed, out);
+ opCtx, shardResults, mongosStageName, millisElapsed, out);
}
} clusterCountCmd;
diff --git a/src/mongo/s/commands/cluster_drop_cmd.cpp b/src/mongo/s/commands/cluster_drop_cmd.cpp
index 7f611f543f8..2c44a5a1dbc 100644
--- a/src/mongo/s/commands/cluster_drop_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_cmd.cpp
@@ -72,7 +72,7 @@ public:
out->push_back(Privilege(parseResourcePattern(dbname, cmdObj), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -80,7 +80,7 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNsCollectionRequired(dbname, cmdObj));
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, dbname);
if (scopedDbStatus == ErrorCodes::NamespaceNotFound) {
return true;
}
@@ -90,9 +90,9 @@ public:
auto const db = scopedDbStatus.getValue().db();
if (!db->isSharded(nss.ns())) {
- _dropUnshardedCollectionFromShard(txn, db->getPrimaryId(), nss, &result);
+ _dropUnshardedCollectionFromShard(opCtx, db->getPrimaryId(), nss, &result);
} else {
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->dropCollection(txn, nss));
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->dropCollection(opCtx, nss));
db->markNSNotSharded(nss.ns());
}
@@ -104,13 +104,13 @@ private:
* Sends the 'drop' command for the specified collection to the specified shard. Throws
* DBException on failure.
*/
- static void _dropUnshardedCollectionFromShard(OperationContext* txn,
+ static void _dropUnshardedCollectionFromShard(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
BSONObjBuilder* result) {
- const auto shardRegistry = Grid::get(txn)->shardRegistry();
+ const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
- const auto dropCommandBSON = [shardRegistry, txn, &shardId, &nss] {
+ const auto dropCommandBSON = [shardRegistry, opCtx, &shardId, &nss] {
BSONObjBuilder builder;
builder.append("drop", nss.coll());
@@ -121,17 +121,17 @@ private:
ChunkVersion::UNSHARDED().appendForCommands(&builder);
}
- if (!txn->getWriteConcern().usedDefault) {
+ if (!opCtx->getWriteConcern().usedDefault) {
builder.append(WriteConcernOptions::kWriteConcernField,
- txn->getWriteConcern().toBSON());
+ opCtx->getWriteConcern().toBSON());
}
return builder.obj();
}();
- const auto shard = uassertStatusOK(shardRegistry->getShard(txn, shardId));
+ const auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardId));
auto cmdDropResult = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
nss.db().toString(),
dropCommandBSON,
diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
index f227b4aa89a..f86cf073273 100644
--- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp
+++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp
@@ -72,7 +72,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -86,17 +86,17 @@ public:
"have to pass 1 as db parameter",
cmdObj.firstElement().isNumber() && cmdObj.firstElement().number() == 1);
- auto const catalogClient = Grid::get(txn)->catalogClient(txn);
+ auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
// Lock the database globally to prevent conflicts with simultaneous database
// creation/modification.
auto scopedDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
- txn, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
+ opCtx, dbname, "dropDatabase", DistLockManager::kDefaultLockTimeout));
// Refresh the database metadata so it kicks off a full reload
- Grid::get(txn)->catalogCache()->invalidate(dbname);
+ Grid::get(opCtx)->catalogCache()->invalidate(dbname);
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, dbname);
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, dbname);
if (scopedDbStatus == ErrorCodes::NamespaceNotFound) {
result.append("info", "database does not exist");
@@ -105,7 +105,7 @@ public:
uassertStatusOK(scopedDbStatus.getStatus());
- catalogClient->logChange(txn,
+ catalogClient->logChange(opCtx,
"dropDatabase.start",
dbname,
BSONObj(),
@@ -114,27 +114,27 @@ public:
auto const db = scopedDbStatus.getValue().db();
// Drop the database's collections from metadata
- for (const auto& nss : getAllShardedCollectionsForDb(txn, dbname)) {
- uassertStatusOK(catalogClient->dropCollection(txn, nss));
+ for (const auto& nss : getAllShardedCollectionsForDb(opCtx, dbname)) {
+ uassertStatusOK(catalogClient->dropCollection(opCtx, nss));
db->markNSNotSharded(nss.ns());
}
// Drop the database from the primary shard first
- _dropDatabaseFromShard(txn, db->getPrimaryId(), dbname);
+ _dropDatabaseFromShard(opCtx, db->getPrimaryId(), dbname);
// Drop the database from each of the remaining shards
{
std::vector<ShardId> allShardIds;
- Grid::get(txn)->shardRegistry()->getAllShardIds(&allShardIds);
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&allShardIds);
for (const ShardId& shardId : allShardIds) {
- _dropDatabaseFromShard(txn, shardId, dbname);
+ _dropDatabaseFromShard(opCtx, shardId, dbname);
}
}
// Remove the database entry from the metadata
Status status =
- catalogClient->removeConfigDocuments(txn,
+ catalogClient->removeConfigDocuments(opCtx,
DatabaseType::ConfigNS,
BSON(DatabaseType::name(dbname)),
ShardingCatalogClient::kMajorityWriteConcern);
@@ -146,10 +146,10 @@ public:
}
// Invalidate the database so the next access will do a full reload
- Grid::get(txn)->catalogCache()->invalidate(dbname);
+ Grid::get(opCtx)->catalogCache()->invalidate(dbname);
catalogClient->logChange(
- txn, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
+ opCtx, "dropDatabase", dbname, BSONObj(), ShardingCatalogClient::kMajorityWriteConcern);
result.append("dropped", dbname);
return true;
@@ -160,24 +160,25 @@ private:
* Sends the 'dropDatabase' command for the specified database to the specified shard. Throws
* DBException on failure.
*/
- static void _dropDatabaseFromShard(OperationContext* txn,
+ static void _dropDatabaseFromShard(OperationContext* opCtx,
const ShardId& shardId,
const std::string& dbName) {
- const auto dropDatabaseCommandBSON = [txn, &dbName] {
+ const auto dropDatabaseCommandBSON = [opCtx, &dbName] {
BSONObjBuilder builder;
builder.append("dropDatabase", 1);
- if (!txn->getWriteConcern().usedDefault) {
+ if (!opCtx->getWriteConcern().usedDefault) {
builder.append(WriteConcernOptions::kWriteConcernField,
- txn->getWriteConcern().toBSON());
+ opCtx->getWriteConcern().toBSON());
}
return builder.obj();
}();
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
auto cmdDropDatabaseResult = uassertStatusOK(shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbName,
dropDatabaseCommandBSON,
diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
index 6f8338a0ae8..1db7ea7ef03 100644
--- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
+++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp
@@ -87,7 +87,7 @@ public:
return cmdObj.firstElement().str();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname_unused,
BSONObj& cmdObj,
int options,
@@ -105,11 +105,11 @@ public:
return false;
}
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->enableSharding(txn, dbname));
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->enableSharding(opCtx, dbname));
audit::logEnableSharding(Client::getCurrent(), dbname);
// Make sure to force update of any stale metadata
- Grid::get(txn)->catalogCache()->invalidate(dbname);
+ Grid::get(opCtx)->catalogCache()->invalidate(dbname);
return true;
}
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index a4268b26f6c..996893fc8be 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -214,7 +214,7 @@ const char* ClusterExplain::getStageNameForReadOp(
}
// static
-void ClusterExplain::buildPlannerInfo(OperationContext* txn,
+void ClusterExplain::buildPlannerInfo(OperationContext* opCtx,
const vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
BSONObjBuilder* out) {
@@ -233,8 +233,8 @@ void ClusterExplain::buildPlannerInfo(OperationContext* txn,
singleShardBob.append("shardName", shardResults[i].shardTargetId.toString());
{
- const auto shard =
- uassertStatusOK(grid.shardRegistry()->getShard(txn, shardResults[i].shardTargetId));
+ const auto shard = uassertStatusOK(
+ grid.shardRegistry()->getShard(opCtx, shardResults[i].shardTargetId));
singleShardBob.append("connectionString", shard->getConnString().toString());
}
appendIfRoom(&singleShardBob, serverInfo, "serverInfo");
@@ -356,7 +356,7 @@ void ClusterExplain::buildExecStats(const vector<Strategy::CommandResult>& shard
}
// static
-Status ClusterExplain::buildExplainResult(OperationContext* txn,
+Status ClusterExplain::buildExplainResult(OperationContext* opCtx,
const vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
long long millisElapsed,
@@ -367,7 +367,7 @@ Status ClusterExplain::buildExplainResult(OperationContext* txn,
return validateStatus;
}
- buildPlannerInfo(txn, shardResults, mongosStageName, out);
+ buildPlannerInfo(opCtx, shardResults, mongosStageName, out);
buildExecStats(shardResults, mongosStageName, millisElapsed, out);
return Status::OK();
diff --git a/src/mongo/s/commands/cluster_explain.h b/src/mongo/s/commands/cluster_explain.h
index 600f176337f..38332b15748 100644
--- a/src/mongo/s/commands/cluster_explain.h
+++ b/src/mongo/s/commands/cluster_explain.h
@@ -91,7 +91,7 @@ public:
*
* On success, the output is added to the BSONObj builder 'out'.
*/
- static Status buildExplainResult(OperationContext* txn,
+ static Status buildExplainResult(OperationContext* opCtx,
const std::vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
long long millisElapsed,
@@ -120,7 +120,7 @@ private:
* The planner info will display 'mongosStageName' as the name of the execution stage
* performed by mongos after gathering results from the shards.
*/
- static void buildPlannerInfo(OperationContext* txn,
+ static void buildPlannerInfo(OperationContext* opCtx,
const std::vector<Strategy::CommandResult>& shardResults,
const char* mongosStageName,
BSONObjBuilder* out);
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index 3030589a080..4031a45c02a 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -87,7 +87,7 @@ public:
* the command that you are explaining. The auth check is performed recursively
* on the nested command.
*/
- virtual Status checkAuthForOperation(OperationContext* txn,
+ virtual Status checkAuthForOperation(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj) {
if (Object != cmdObj.firstElement().type()) {
@@ -103,10 +103,10 @@ public:
return Status(ErrorCodes::CommandNotFound, ss);
}
- return commToExplain->checkAuthForOperation(txn, dbname, explainObj);
+ return commToExplain->checkAuthForOperation(opCtx, dbname, explainObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -140,7 +140,7 @@ public:
// Actually call the nested command's explain(...) method.
Status explainStatus =
- commToExplain->explain(txn, dbName, explainObj, verbosity, metadata, &result);
+ commToExplain->explain(opCtx, dbName, explainObj, verbosity, metadata, &result);
if (!explainStatus.isOK()) {
return appendCommandStatus(result, explainStatus);
}
diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
index d41fa0a5707..578968205af 100644
--- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp
@@ -82,7 +82,7 @@ public:
find_and_modify::addPrivilegesRequiredForFindAndModify(this, dbname, cmdObj, out);
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -90,20 +90,21 @@ public:
BSONObjBuilder* out) const {
const NamespaceString nss = parseNsCollectionRequired(dbName, cmdObj);
- auto scopedDB = uassertStatusOK(ScopedShardDatabase::getExisting(txn, dbName));
+ auto scopedDB = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, dbName));
const auto conf = scopedDB.db();
shared_ptr<ChunkManager> chunkMgr;
shared_ptr<Shard> shard;
if (!conf->isSharded(nss.ns())) {
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ auto shardStatus =
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
shard = shardStatus.getValue();
} else {
- chunkMgr = _getChunkManager(txn, conf, nss);
+ chunkMgr = _getChunkManager(opCtx, conf, nss);
const BSONObj query = cmdObj.getObjectField("query");
@@ -117,7 +118,7 @@ public:
return collationElementStatus;
}
- StatusWith<BSONObj> status = _getShardKey(txn, chunkMgr, query);
+ StatusWith<BSONObj> status = _getShardKey(opCtx, chunkMgr, query);
if (!status.isOK()) {
return status.getStatus();
}
@@ -125,7 +126,8 @@ public:
BSONObj shardKey = status.getValue();
auto chunk = chunkMgr->findIntersectingChunk(shardKey, collation);
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, chunk->getShardId());
+ auto shardStatus =
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, chunk->getShardId());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -141,7 +143,7 @@ public:
Timer timer;
BSONObjBuilder result;
- bool ok = _runCommand(txn, conf, chunkMgr, shard->getId(), nss, explainCmd.obj(), result);
+ bool ok = _runCommand(opCtx, conf, chunkMgr, shard->getId(), nss, explainCmd.obj(), result);
long long millisElapsed = timer.millis();
if (!ok) {
@@ -159,10 +161,10 @@ public:
shardResults.push_back(cmdResult);
return ClusterExplain::buildExplainResult(
- txn, shardResults, ClusterExplain::kSingleShard, millisElapsed, out);
+ opCtx, shardResults, ClusterExplain::kSingleShard, millisElapsed, out);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -172,14 +174,14 @@ public:
// findAndModify should only be creating database if upsert is true, but this would require
// that the parsing be pulled into this function.
- auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, dbName));
+ auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, dbName));
const auto conf = scopedDb.db();
if (!conf->isSharded(nss.ns())) {
- return _runCommand(txn, conf, nullptr, conf->getPrimaryId(), nss, cmdObj, result);
+ return _runCommand(opCtx, conf, nullptr, conf->getPrimaryId(), nss, cmdObj, result);
}
- shared_ptr<ChunkManager> chunkMgr = _getChunkManager(txn, conf, nss);
+ shared_ptr<ChunkManager> chunkMgr = _getChunkManager(opCtx, conf, nss);
const BSONObj query = cmdObj.getObjectField("query");
@@ -193,7 +195,7 @@ public:
return appendCommandStatus(result, collationElementStatus);
}
- StatusWith<BSONObj> status = _getShardKey(txn, chunkMgr, query);
+ StatusWith<BSONObj> status = _getShardKey(opCtx, chunkMgr, query);
if (!status.isOK()) {
// Bad query
return appendCommandStatus(result, status.getStatus());
@@ -202,31 +204,32 @@ public:
BSONObj shardKey = status.getValue();
auto chunk = chunkMgr->findIntersectingChunk(shardKey, collation);
- const bool ok = _runCommand(txn, conf, chunkMgr, chunk->getShardId(), nss, cmdObj, result);
+ const bool ok =
+ _runCommand(opCtx, conf, chunkMgr, chunk->getShardId(), nss, cmdObj, result);
if (ok) {
updateChunkWriteStatsAndSplitIfNeeded(
- txn, chunkMgr.get(), chunk.get(), cmdObj.getObjectField("update").objsize());
+ opCtx, chunkMgr.get(), chunk.get(), cmdObj.getObjectField("update").objsize());
}
return ok;
}
private:
- shared_ptr<ChunkManager> _getChunkManager(OperationContext* txn,
+ shared_ptr<ChunkManager> _getChunkManager(OperationContext* opCtx,
DBConfig* conf,
const NamespaceString& nss) const {
- shared_ptr<ChunkManager> chunkMgr = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> chunkMgr = conf->getChunkManager(opCtx, nss.ns());
massert(13002, "shard internal error chunk manager should never be null", chunkMgr);
return chunkMgr;
}
- StatusWith<BSONObj> _getShardKey(OperationContext* txn,
+ StatusWith<BSONObj> _getShardKey(OperationContext* opCtx,
shared_ptr<ChunkManager> chunkMgr,
const BSONObj& query) const {
// Verify that the query has an equality predicate using the shard key
StatusWith<BSONObj> status =
- chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(txn, query);
+ chunkMgr->getShardKeyPattern().extractShardKeyFromQuery(opCtx, query);
if (!status.isOK()) {
return status;
@@ -242,7 +245,7 @@ private:
return shardKey;
}
- bool _runCommand(OperationContext* txn,
+ bool _runCommand(OperationContext* opCtx,
DBConfig* conf,
shared_ptr<ChunkManager> chunkManager,
const ShardId& shardId,
@@ -251,7 +254,8 @@ private:
BSONObjBuilder& result) const {
BSONObj res;
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
ShardConnection conn(shard->getConnString(), nss.ns(), chunkManager);
bool ok = conn->runCommand(conf->name(), cmdObj, res);
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index 52edfe5e20e..d308c3a53fd 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -101,7 +101,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForFind(nss, hasTerm);
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -116,7 +116,7 @@ public:
}
auto result = Strategy::explainFind(
- txn, cmdObj, *qr.getValue(), verbosity, serverSelectionMetadata, out);
+ opCtx, cmdObj, *qr.getValue(), verbosity, serverSelectionMetadata, out);
if (result == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
auto resolvedView = ResolvedView::fromBSON(out->asTempObj());
@@ -136,8 +136,8 @@ public:
ClusterAggregate::Namespaces nsStruct;
nsStruct.requestedNss = std::move(nss);
nsStruct.executionNss = std::move(resolvedView.getNamespace());
- auto status =
- ClusterAggregate::runAggregate(txn, nsStruct, aggCmd.getValue(), queryOptions, out);
+ auto status = ClusterAggregate::runAggregate(
+ opCtx, nsStruct, aggCmd.getValue(), queryOptions, out);
appendCommandStatus(*out, status);
return status;
}
@@ -145,7 +145,7 @@ public:
return result;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -163,7 +163,7 @@ public:
}
auto cq =
- CanonicalQuery::canonicalize(txn, std::move(qr.getValue()), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(opCtx, std::move(qr.getValue()), ExtensionsCallbackNoop());
if (!cq.isOK()) {
return appendCommandStatus(result, cq.getStatus());
}
@@ -181,7 +181,7 @@ public:
std::vector<BSONObj> batch;
BSONObj viewDefinition;
auto cursorId = ClusterFind::runQuery(
- txn, *cq.getValue(), readPref.getValue(), &batch, &viewDefinition);
+ opCtx, *cq.getValue(), readPref.getValue(), &batch, &viewDefinition);
if (!cursorId.isOK()) {
if (cursorId.getStatus() == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
auto aggCmdOnView = cq.getValue()->getQueryRequest().asAggregationCommand();
@@ -203,7 +203,7 @@ public:
nsStruct.requestedNss = std::move(nss);
nsStruct.executionNss = std::move(resolvedView.getNamespace());
auto status = ClusterAggregate::runAggregate(
- txn, nsStruct, aggCmd.getValue(), options, &result);
+ opCtx, nsStruct, aggCmd.getValue(), options, &result);
appendCommandStatus(result, status);
return status.isOK();
}
diff --git a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
index 4f489ab28c1..35150ac3aca 100644
--- a/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
+++ b/src/mongo/s/commands/cluster_flush_router_config_cmd.cpp
@@ -64,13 +64,13 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- Grid::get(txn)->catalogCache()->invalidateAll();
+ Grid::get(opCtx)->catalogCache()->invalidateAll();
result.appendBool("flushed", true);
return true;
diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp
index 6de6d08021f..a75c0b3629e 100644
--- a/src/mongo/s/commands/cluster_fsync_cmd.cpp
+++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp
@@ -67,7 +67,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -87,14 +87,14 @@ public:
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
const auto s = shardStatus.getValue();
auto response = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
BSON("fsync" << 1),
diff --git a/src/mongo/s/commands/cluster_ftdc_commands.cpp b/src/mongo/s/commands/cluster_ftdc_commands.cpp
index 23903e92984..f8d55e352ef 100644
--- a/src/mongo/s/commands/cluster_ftdc_commands.cpp
+++ b/src/mongo/s/commands/cluster_ftdc_commands.cpp
@@ -70,7 +70,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
index 2b4e386b312..7a81082597f 100644
--- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp
@@ -81,7 +81,7 @@ BSONObj buildGLECmdWithOpTime(const BSONObj& gleOptions,
* Returns OK with the LegacyWCResponses containing only write concern error information
* Returns !OK if there was an error getting a GLE response
*/
-Status enforceLegacyWriteConcern(OperationContext* txn,
+Status enforceLegacyWriteConcern(OperationContext* opCtx,
StringData dbName,
const BSONObj& options,
const HostOpTimeMap& hostOpTimes,
@@ -98,7 +98,7 @@ Status enforceLegacyWriteConcern(OperationContext* txn,
const repl::OpTime& opTime = hot.opTime;
const OID& electionId = hot.electionId;
- auto swShard = Grid::get(txn)->shardRegistry()->getShard(txn, shardConnStr.toString());
+ auto swShard = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardConnStr.toString());
if (!swShard.isOK()) {
return swShard.getStatus();
}
@@ -114,9 +114,12 @@ Status enforceLegacyWriteConcern(OperationContext* txn,
// Send the requests and wait to receive all the responses.
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- AsyncRequestsSender ars(
- txn, Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(), dbName, requests, readPref);
- auto responses = ars.waitForResponses(txn);
+ AsyncRequestsSender ars(opCtx,
+ Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
+ dbName,
+ requests,
+ readPref);
+ auto responses = ars.waitForResponses(opCtx);
// Parse the responses.
@@ -201,7 +204,7 @@ public:
// No auth required for getlasterror
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -239,7 +242,7 @@ public:
const HostOpTimeMap hostOpTimes(ClusterLastErrorInfo::get(cc()).getPrevHostOpTimes());
std::vector<LegacyWCResponse> wcResponses;
- auto status = enforceLegacyWriteConcern(txn, dbname, cmdObj, hostOpTimes, &wcResponses);
+ auto status = enforceLegacyWriteConcern(opCtx, dbname, cmdObj, hostOpTimes, &wcResponses);
// Don't forget about our last hosts, reset the client info
ClusterLastErrorInfo::get(cc()).disableForCommand();
diff --git a/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp b/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
index 7eebe3bf5df..57ffc184cc4 100644
--- a/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_prev_error_cmd.cpp
@@ -61,7 +61,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
index 48bbdb2a9d4..5e6be95bebe 100644
--- a/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_map_cmd.cpp
@@ -67,7 +67,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
index 3ab79ef5364..00e104c1e45 100644
--- a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp
@@ -78,7 +78,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -86,10 +86,10 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedDb = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
+ auto scopedDb = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, nss.db()));
auto config = scopedDb.db();
- auto cm = config->getChunkManagerIfExists(txn, nss.ns());
+ auto cm = config->getChunkManagerIfExists(opCtx, nss.ns());
uassert(ErrorCodes::NamespaceNotSharded, "ns [" + nss.ns() + " is not sharded.", cm);
for (const auto& cmEntry : cm->getChunkMap()) {
diff --git a/src/mongo/s/commands/cluster_getmore_cmd.cpp b/src/mongo/s/commands/cluster_getmore_cmd.cpp
index dff5c9c7e4a..e0af7c39663 100644
--- a/src/mongo/s/commands/cluster_getmore_cmd.cpp
+++ b/src/mongo/s/commands/cluster_getmore_cmd.cpp
@@ -91,7 +91,7 @@ public:
request.nss, request.cursorid, request.term.is_initialized());
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -106,7 +106,7 @@ public:
}
const GetMoreRequest& request = parseStatus.getValue();
- auto response = ClusterFind::runGetMore(txn, request);
+ auto response = ClusterFind::runGetMore(opCtx, request);
if (!response.isOK()) {
return appendCommandStatus(result, response.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_index_filter_cmd.cpp b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
index 52cdebea5d5..35612ecd08e 100644
--- a/src/mongo/s/commands/cluster_index_filter_cmd.cpp
+++ b/src/mongo/s/commands/cluster_index_filter_cmd.cpp
@@ -91,7 +91,7 @@ public:
}
// Cluster plan cache command entry point.
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -108,7 +108,7 @@ public:
vector<Strategy::CommandResult> results;
const BSONObj query;
Strategy::commandOp(
- txn, dbname, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
+ opCtx, dbname, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
// Set value of first shard result's "ok" field.
bool clusterCmdResult = true;
diff --git a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
index 5d97540a876..6c9705ff35c 100644
--- a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp
@@ -52,7 +52,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_is_master_cmd.cpp b/src/mongo/s/commands/cluster_is_master_cmd.cpp
index 566660ea92d..761aca9bba1 100644
--- a/src/mongo/s/commands/cluster_is_master_cmd.cpp
+++ b/src/mongo/s/commands/cluster_is_master_cmd.cpp
@@ -67,14 +67,14 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(txn->getClient());
+ auto& clientMetadataIsMasterState = ClientMetadataIsMasterState::get(opCtx->getClient());
bool seenIsMaster = clientMetadataIsMasterState.hasSeenIsMaster();
if (!seenIsMaster) {
clientMetadataIsMasterState.setSeenIsMaster();
@@ -97,10 +97,10 @@ public:
invariant(swParseClientMetadata.getValue());
- swParseClientMetadata.getValue().get().logClientMetadata(txn->getClient());
+ swParseClientMetadata.getValue().get().logClientMetadata(opCtx->getClient());
clientMetadataIsMasterState.setClientMetadata(
- txn->getClient(), std::move(swParseClientMetadata.getValue()));
+ opCtx->getClient(), std::move(swParseClientMetadata.getValue()));
}
result.appendBool("ismaster", true);
@@ -119,9 +119,9 @@ public:
"automationServiceDescriptor",
static_cast<ServerParameter*>(nullptr));
if (parameter)
- parameter->append(txn, result, "automationServiceDescriptor");
+ parameter->append(opCtx, result, "automationServiceDescriptor");
- MessageCompressorManager::forSession(txn->getClient()->session())
+ MessageCompressorManager::forSession(opCtx->getClient()->session())
.serverNegotiate(cmdObj, &result);
return true;
diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp
index bfbc556b36b..d636d6922b3 100644
--- a/src/mongo/s/commands/cluster_kill_op.cpp
+++ b/src/mongo/s/commands/cluster_kill_op.cpp
@@ -76,7 +76,7 @@ public:
return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized");
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
@@ -103,7 +103,7 @@ public:
log() << "want to kill op: " << redact(opToKill);
// Will throw if shard id is not found
- auto shardStatus = grid.shardRegistry()->getShard(txn, shardIdent);
+ auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardIdent);
if (!shardStatus.isOK()) {
return appendCommandStatus(result, shardStatus.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_killcursors_cmd.cpp b/src/mongo/s/commands/cluster_killcursors_cmd.cpp
index 63235315d4c..e04dad6fc42 100644
--- a/src/mongo/s/commands/cluster_killcursors_cmd.cpp
+++ b/src/mongo/s/commands/cluster_killcursors_cmd.cpp
@@ -40,7 +40,9 @@ public:
ClusterKillCursorsCmd() = default;
private:
- Status _killCursor(OperationContext* txn, const NamespaceString& nss, CursorId cursorId) final {
+ Status _killCursor(OperationContext* opCtx,
+ const NamespaceString& nss,
+ CursorId cursorId) final {
return grid.getCursorManager()->killCursor(nss, cursorId);
}
} clusterKillCursorsCmd;
diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
index 82d519b52bb..6b2d8b9400d 100644
--- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp
@@ -83,7 +83,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname_unused,
BSONObj& cmdObj,
int options,
@@ -98,14 +98,14 @@ public:
grid.shardRegistry()->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
const auto s = shardStatus.getValue();
auto response = uassertStatusOK(s->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
"admin",
cmdObj,
@@ -166,9 +166,9 @@ public:
}
// Get information for config and admin dbs from the config servers.
- auto catalogClient = grid.catalogClient(txn);
+ auto catalogClient = grid.catalogClient(opCtx);
auto appendStatus =
- catalogClient->appendInfoForConfigServerDatabases(txn, cmdObj, &dbListBuilder);
+ catalogClient->appendInfoForConfigServerDatabases(opCtx, cmdObj, &dbListBuilder);
if (!appendStatus.isOK()) {
return Command::appendCommandStatus(result, appendStatus);
}
diff --git a/src/mongo/s/commands/cluster_list_shards_cmd.cpp b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
index 8d3b19ad8c8..af90c1bff04 100644
--- a/src/mongo/s/commands/cluster_list_shards_cmd.cpp
+++ b/src/mongo/s/commands/cluster_list_shards_cmd.cpp
@@ -68,14 +68,14 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
std::string& errmsg,
BSONObjBuilder& result) {
- auto shardsStatus = grid.catalogClient(txn)->getAllShards(
- txn, repl::ReadConcernLevel::kMajorityReadConcern);
+ auto shardsStatus = grid.catalogClient(opCtx)->getAllShards(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern);
if (!shardsStatus.isOK()) {
return appendCommandStatus(result, shardsStatus.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 6d4a4155365..088b8d6d4d1 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -182,7 +182,7 @@ public:
mr::addPrivilegesRequiredForMapReduce(this, dbname, cmdObj, out);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -232,7 +232,7 @@ public:
}
// Ensure the input database exists
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbname);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbname);
if (!status.isOK()) {
return appendCommandStatus(result, status.getStatus());
}
@@ -242,7 +242,7 @@ public:
shared_ptr<DBConfig> confOut;
if (customOutDB) {
// Create the output database implicitly, since we have a custom output requested
- auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, outDB));
+ auto scopedDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, outDB));
confOut = scopedDb.getSharedDbReference();
} else {
confOut = confIn;
@@ -274,14 +274,14 @@ public:
maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
if (maxChunkSizeBytes == 0) {
maxChunkSizeBytes =
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes();
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes();
}
// maxChunkSizeBytes is sent as int BSON field
invariant(maxChunkSizeBytes < std::numeric_limits<int>::max());
}
- const auto shardRegistry = Grid::get(txn)->shardRegistry();
+ const auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// modify command to run on shards with output to tmp collection
string badShardedField;
@@ -292,7 +292,7 @@ public:
LOG(1) << "simple MR, just passthrough";
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, confIn->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, confIn->getPrimaryId()));
ShardConnection conn(shard->getConnString(), "");
@@ -338,7 +338,7 @@ public:
try {
Strategy::commandOp(
- txn, dbname, shardedCommand, 0, nss.ns(), q, collation, &mrCommandResults);
+ opCtx, dbname, shardedCommand, 0, nss.ns(), q, collation, &mrCommandResults);
} catch (DBException& e) {
e.addContext(str::stream() << "could not run map command on all shards for ns "
<< nss.ns()
@@ -352,7 +352,7 @@ public:
string server;
{
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, mrResult.shardTargetId));
+ uassertStatusOK(shardRegistry->getShard(opCtx, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
servers.insert(server);
@@ -413,7 +413,7 @@ public:
finalCmd.append("inputDB", dbname);
finalCmd.append("shardedOutputCollection", shardResultCollection);
finalCmd.append("shards", shardResultsB.done());
- finalCmd.append("writeConcern", txn->getWriteConcern().toBSON());
+ finalCmd.append("writeConcern", opCtx->getWriteConcern().toBSON());
BSONObj shardCounts = shardCountsB.done();
finalCmd.append("shardCounts", shardCounts);
@@ -446,7 +446,7 @@ public:
if (!shardedOutput) {
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, confOut->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, confOut->getPrimaryId()));
LOG(1) << "MR with single shard output, NS=" << outputCollNss.ns()
<< " primary=" << shard->toString();
@@ -472,20 +472,20 @@ public:
// Create the sharded collection if needed
if (!confOut->isSharded(outputCollNss.ns())) {
// Enable sharding on the output db
- Status status = Grid::get(txn)->catalogClient(txn)->enableSharding(
- txn, outputCollNss.db().toString());
+ Status status = Grid::get(opCtx)->catalogClient(opCtx)->enableSharding(
+ opCtx, outputCollNss.db().toString());
// If the database has sharding already enabled, we can ignore the error
if (status.isOK()) {
// Invalidate the output database so it gets reloaded on the next fetch attempt
- Grid::get(txn)->catalogCache()->invalidate(outputCollNss.db());
+ Grid::get(opCtx)->catalogCache()->invalidate(outputCollNss.db());
} else if (status != ErrorCodes::AlreadyInitialized) {
uassertStatusOK(status);
}
confOut.reset();
- confOut = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(
- txn, outputCollNss.db().toString()));
+ confOut = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(
+ opCtx, outputCollNss.db().toString()));
// Shard collection according to split points
vector<BSONObj> sortedSplitPts;
@@ -523,24 +523,24 @@ public:
BSONObj defaultCollation;
uassertStatusOK(
- Grid::get(txn)->catalogClient(txn)->shardCollection(txn,
- outputCollNss.ns(),
- sortKeyPattern,
- defaultCollation,
- true,
- sortedSplitPts,
- outShardIds));
+ Grid::get(opCtx)->catalogClient(opCtx)->shardCollection(opCtx,
+ outputCollNss.ns(),
+ sortKeyPattern,
+ defaultCollation,
+ true,
+ sortedSplitPts,
+ outShardIds));
// Make sure the cached metadata for the collection knows that we are now sharded
- confOut->getChunkManager(txn, outputCollNss.ns(), true /* reload */);
+ confOut->getChunkManager(opCtx, outputCollNss.ns(), true /* reload */);
}
auto chunkSizes = SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<int>();
{
// Take distributed lock to prevent split / migration.
auto scopedDistLock =
- Grid::get(txn)->catalogClient(txn)->getDistLockManager()->lock(
- txn, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
+ Grid::get(opCtx)->catalogClient(opCtx)->getDistLockManager()->lock(
+ opCtx, outputCollNss.ns(), "mr-post-process", kNoDistLockTimeout);
if (!scopedDistLock.isOK()) {
return appendCommandStatus(result, scopedDistLock.getStatus());
}
@@ -550,7 +550,7 @@ public:
try {
const BSONObj query;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
outDB,
finalCmdObj,
0,
@@ -570,8 +570,9 @@ public:
for (const auto& mrResult : mrCommandResults) {
string server;
{
- const auto shard = uassertStatusOK(
- Grid::get(txn)->shardRegistry()->getShard(txn, mrResult.shardTargetId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(
+ opCtx, mrResult.shardTargetId));
server = shard->getConnString().toString();
}
singleResult = mrResult.result;
@@ -609,7 +610,8 @@ public:
}
// Do the splitting round
- shared_ptr<ChunkManager> cm = confOut->getChunkManagerIfExists(txn, outputCollNss.ns());
+ shared_ptr<ChunkManager> cm =
+ confOut->getChunkManagerIfExists(opCtx, outputCollNss.ns());
uassert(34359,
str::stream() << "Failed to write mapreduce output to " << outputCollNss.ns()
<< "; expected that collection to be sharded, but it was not",
@@ -626,7 +628,7 @@ public:
warning() << "Mongod reported " << size << " bytes inserted for key " << key
<< " but can't find chunk";
} else {
- updateChunkWriteStatsAndSplitIfNeeded(txn, cm.get(), c.get(), size);
+ updateChunkWriteStatsAndSplitIfNeeded(opCtx, cm.get(), c.get(), size);
}
}
}
diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
index 2aaeeaeabb0..6b247823381 100644
--- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
+++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp
@@ -99,7 +99,7 @@ public:
static BSONField<string> configField;
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int,
@@ -107,7 +107,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
vector<BSONObj> bounds;
if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) {
@@ -158,7 +158,7 @@ public:
remoteCmdObjB.append(cmdObj[ClusterMergeChunksCommand::boundsField()]);
remoteCmdObjB.append(
ClusterMergeChunksCommand::configField(),
- Grid::get(txn)->shardRegistry()->getConfigServerConnectionString().toString());
+ Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString().toString());
remoteCmdObjB.append(ClusterMergeChunksCommand::shardNameField(),
firstChunk->getShardId().toString());
@@ -167,7 +167,7 @@ public:
// Throws, but handled at level above. Don't want to rewrap to preserve exception
// formatting.
const auto shardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, firstChunk->getShardId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, firstChunk->getShardId());
if (!shardStatus.isOK()) {
return appendCommandStatus(
result,
diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
index af94cb7396f..c3cb18ceb15 100644
--- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp
@@ -96,7 +96,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -106,7 +106,7 @@ public:
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
const auto toElt = cmdObj["to"];
uassert(ErrorCodes::TypeMismatch,
@@ -118,7 +118,7 @@ public:
return false;
}
- const auto toStatus = Grid::get(txn)->shardRegistry()->getShard(txn, toString);
+ const auto toStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, toString);
if (!toStatus.isOK()) {
string msg(str::stream() << "Could not move chunk in '" << nss.ns() << "' to shard '"
<< toString
@@ -132,7 +132,8 @@ public:
// so far, chunk size serves test purposes; it may or may not become a supported parameter
long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
if (maxChunkSizeBytes == 0) {
- maxChunkSizeBytes = Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes();
+ maxChunkSizeBytes =
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes();
}
BSONObj find = cmdObj.getObjectField("find");
@@ -151,7 +152,7 @@ public:
if (!find.isEmpty()) {
// find
BSONObj shardKey =
- uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(txn, find));
+ uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(opCtx, find));
if (shardKey.isEmpty()) {
errmsg = str::stream() << "no shard key found in chunk query " << find;
return false;
@@ -191,7 +192,7 @@ public:
chunkType.setShard(chunk->getShardId());
chunkType.setVersion(cm->getVersion());
- uassertStatusOK(configsvr_client::moveChunk(txn,
+ uassertStatusOK(configsvr_client::moveChunk(opCtx,
chunkType,
to->getId(),
maxChunkSizeBytes,
@@ -200,7 +201,7 @@ public:
// Proactively refresh the chunk manager. Not strictly necessary, but this way it's
// immediately up-to-date the next time it's used.
- scopedCM.db()->getChunkManagerIfExists(txn, nss.ns(), true);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, nss.ns(), true);
result.append("millis", t.millis());
return true;
diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
index 652c204c60e..dc192f1a6a6 100644
--- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp
+++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp
@@ -101,7 +101,7 @@ public:
return nsElt.str();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname_unused,
BSONObj& cmdObj,
int options,
@@ -120,14 +120,14 @@ public:
return false;
}
- auto const catalogClient = Grid::get(txn)->catalogClient(txn);
- auto const catalogCache = Grid::get(txn)->catalogCache();
- auto const shardRegistry = Grid::get(txn)->shardRegistry();
+ auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
+ auto const catalogCache = Grid::get(opCtx)->catalogCache();
+ auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
// Flush all cached information. This can't be perfect, but it's better than nothing.
catalogCache->invalidate(dbname);
- auto config = uassertStatusOK(catalogCache->getDatabase(txn, dbname));
+ auto config = uassertStatusOK(catalogCache->getDatabase(opCtx, dbname));
const auto toElt = cmdObj["to"];
uassert(ErrorCodes::TypeMismatch,
@@ -140,10 +140,10 @@ public:
}
const auto fromShard =
- uassertStatusOK(shardRegistry->getShard(txn, config->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, config->getPrimaryId()));
const auto toShard = [&]() {
- auto toShardStatus = shardRegistry->getShard(txn, to);
+ auto toShardStatus = shardRegistry->getShard(opCtx, to);
if (!toShardStatus.isOK()) {
const std::string msg(
str::stream() << "Could not move database '" << dbname << "' to shard '" << to
@@ -165,13 +165,13 @@ public:
const std::string whyMessage(str::stream() << "Moving primary shard of " << dbname);
auto scopedDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock(
- txn, dbname + "-movePrimary", whyMessage, DistLockManager::kDefaultLockTimeout));
+ opCtx, dbname + "-movePrimary", whyMessage, DistLockManager::kDefaultLockTimeout));
- const auto shardedColls = getAllShardedCollectionsForDb(txn, dbname);
+ const auto shardedColls = getAllShardedCollectionsForDb(opCtx, dbname);
// Record start in changelog
catalogClient->logChange(
- txn,
+ opCtx,
"movePrimary.start",
dbname,
_buildMoveLogEntry(dbname, fromShard->toString(), toShard->toString(), shardedColls),
@@ -197,7 +197,7 @@ public:
<< bypassDocumentValidationCommandOption()
<< true
<< "writeConcern"
- << txn->getWriteConcern().toBSON()),
+ << opCtx->getWriteConcern().toBSON()),
cloneRes);
toconn.done();
@@ -215,10 +215,10 @@ public:
// Update the new primary in the config server metadata
{
- auto dbt = uassertStatusOK(catalogClient->getDatabase(txn, dbname)).value;
+ auto dbt = uassertStatusOK(catalogClient->getDatabase(opCtx, dbname)).value;
dbt.setPrimary(toShard->getId());
- uassertStatusOK(catalogClient->updateDatabase(txn, dbname, dbt));
+ uassertStatusOK(catalogClient->updateDatabase(opCtx, dbname, dbt));
}
// Ensure the next attempt to retrieve the database or any of its collections will do a full
@@ -236,7 +236,7 @@ public:
try {
BSONObj dropDBInfo;
- fromconn->dropDatabase(dbname.c_str(), txn->getWriteConcern(), &dropDBInfo);
+ fromconn->dropDatabase(dbname.c_str(), opCtx->getWriteConcern(), &dropDBInfo);
if (!hasWCError) {
if (auto wcErrorElem = dropDBInfo["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(
@@ -269,7 +269,7 @@ public:
<< oldPrimary;
BSONObj dropCollInfo;
fromconn->dropCollection(
- el.String(), txn->getWriteConcern(), &dropCollInfo);
+ el.String(), opCtx->getWriteConcern(), &dropCollInfo);
if (!hasWCError) {
if (auto wcErrorElem = dropCollInfo["writeConcernError"]) {
appendWriteConcernErrorToCmdResponse(
@@ -296,7 +296,7 @@ public:
// Record finish in changelog
catalogClient->logChange(
- txn,
+ opCtx,
"movePrimary",
dbname,
_buildMoveLogEntry(dbname, oldPrimary, toShard->toString(), shardedColls),
diff --git a/src/mongo/s/commands/cluster_netstat_cmd.cpp b/src/mongo/s/commands/cluster_netstat_cmd.cpp
index fd5a2b2e87d..0d50223c112 100644
--- a/src/mongo/s/commands/cluster_netstat_cmd.cpp
+++ b/src/mongo/s/commands/cluster_netstat_cmd.cpp
@@ -65,7 +65,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index 46ab48b39a2..e533ae05392 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -72,7 +72,7 @@ public:
return AuthorizationSession::get(client)->checkAuthForAggregate(nss, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -83,7 +83,7 @@ public:
ClusterAggregate::Namespaces nsStruct;
nsStruct.requestedNss = nss;
nsStruct.executionNss = std::move(nss);
- auto status = ClusterAggregate::runAggregate(txn, nsStruct, cmdObj, options, &result);
+ auto status = ClusterAggregate::runAggregate(opCtx, nsStruct, cmdObj, options, &result);
appendCommandStatus(result, status);
return status.isOK();
}
diff --git a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
index fea10e3de88..b29eff8b0b2 100644
--- a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
+++ b/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
@@ -86,7 +86,7 @@ public:
}
// Cluster plan cache command entry point.
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -110,7 +110,7 @@ private:
// Cluster plan cache command implementation(s) below
//
-bool ClusterPlanCacheCmd::run(OperationContext* txn,
+bool ClusterPlanCacheCmd::run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -124,7 +124,7 @@ bool ClusterPlanCacheCmd::run(OperationContext* txn,
vector<Strategy::CommandResult> results;
const BSONObj query;
Strategy::commandOp(
- txn, dbName, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
+ opCtx, dbName, cmdObj, options, nss.ns(), query, CollationSpec::kSimpleSpec, &results);
// Set value of first shard result's "ok" field.
bool clusterCmdResult = true;
diff --git a/src/mongo/s/commands/cluster_profile_cmd.cpp b/src/mongo/s/commands/cluster_profile_cmd.cpp
index dcdc63d4bb6..74d6bf57cde 100644
--- a/src/mongo/s/commands/cluster_profile_cmd.cpp
+++ b/src/mongo/s/commands/cluster_profile_cmd.cpp
@@ -58,7 +58,7 @@ public:
out->push_back(Privilege(ResourcePattern::forDatabaseName(dbname), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
index 6aeee9bd7e7..edf4ca92c57 100644
--- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp
@@ -79,7 +79,7 @@ public:
out->push_back(Privilege(ResourcePattern::forClusterResource(), actions));
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -91,7 +91,7 @@ public:
cmdObj.firstElement().type() == BSONType::String);
const string target = cmdObj.firstElement().str();
- const auto shardStatus = grid.shardRegistry()->getShard(txn, ShardId(target));
+ const auto shardStatus = grid.shardRegistry()->getShard(opCtx, ShardId(target));
if (!shardStatus.isOK()) {
string msg(str::stream() << "Could not drop shard '" << target
<< "' because it does not exist");
@@ -100,15 +100,15 @@ public:
}
const auto s = shardStatus.getValue();
- auto catalogClient = grid.catalogClient(txn);
+ auto catalogClient = grid.catalogClient(opCtx);
StatusWith<ShardDrainingStatus> removeShardResult =
- catalogClient->removeShard(txn, s->getId());
+ catalogClient->removeShard(opCtx, s->getId());
if (!removeShardResult.isOK()) {
return appendCommandStatus(result, removeShardResult.getStatus());
}
vector<string> databases;
- Status status = catalogClient->getDatabasesForShard(txn, s->getId(), &databases);
+ Status status = catalogClient->getDatabasesForShard(opCtx, s->getId(), &databases);
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
@@ -142,7 +142,7 @@ public:
case ShardDrainingStatus::ONGOING: {
vector<ChunkType> chunks;
Status status =
- catalogClient->getChunks(txn,
+ catalogClient->getChunks(opCtx,
BSON(ChunkType::shard(s->getId().toString())),
BSONObj(),
boost::none, // return all
diff --git a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
index 028f79e32ae..18337e99d29 100644
--- a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
+++ b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp
@@ -103,7 +103,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -116,9 +116,9 @@ public:
parsedRequest.appendAsConfigCommand(&cmdBuilder);
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
cmdBuilder.obj(),
diff --git a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
index 5c342b14489..e3474ed8393 100644
--- a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
+++ b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp
@@ -64,7 +64,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_reset_error_cmd.cpp b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
index 899d51b658e..cad0ffe8740 100644
--- a/src/mongo/s/commands/cluster_reset_error_cmd.cpp
+++ b/src/mongo/s/commands/cluster_reset_error_cmd.cpp
@@ -59,7 +59,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
index 54a6d9d9bb7..59b2d6ef071 100644
--- a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
+++ b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp
@@ -83,7 +83,7 @@ public:
return Status::OK();
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -93,9 +93,9 @@ public:
FeatureCompatibilityVersionCommandParser::extractVersionFromCommand(getName(), cmdObj));
// Forward to config shard, which will forward to all shards.
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto response = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
dbname,
BSON("_configsvrSetFeatureCompatibilityVersion" << version),
diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
index 79f792e0c7b..7692e764e02 100644
--- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp
@@ -116,7 +116,7 @@ BSONObj createIndexDoc(const std::string& ns,
/**
* Used only for writes to the config server, config and admin databases.
*/
-Status clusterCreateIndex(OperationContext* txn,
+Status clusterCreateIndex(OperationContext* opCtx,
const std::string& ns,
const BSONObj& keys,
const BSONObj& collation,
@@ -134,7 +134,7 @@ Status clusterCreateIndex(OperationContext* txn,
BatchedCommandResponse response;
ClusterWriter writer(false, 0);
- writer.write(txn, request, &response);
+ writer.write(opCtx, request, &response);
return response.toStatus();
}
@@ -177,7 +177,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -185,10 +185,10 @@ public:
BSONObjBuilder& result) override {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto const catalogClient = Grid::get(txn)->catalogClient(txn);
- auto const shardRegistry = Grid::get(txn)->shardRegistry();
+ auto const catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
+ auto const shardRegistry = Grid::get(opCtx)->shardRegistry();
- auto scopedShardedDb = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
+ auto scopedShardedDb = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, nss.db()));
const auto config = scopedShardedDb.db();
// Ensure sharding is allowed on the database
@@ -238,7 +238,7 @@ public:
bsonExtractTypedField(cmdObj, "collation", BSONType::Object, &collationElement);
if (collationStatus.isOK()) {
// Ensure that the collation is valid. Currently we only allow the simple collation.
- auto collator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto collator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(collationElement.Obj());
if (!collator.getStatus().isOK()) {
return appendCommandStatus(result, collator.getStatus());
@@ -281,7 +281,7 @@ public:
// The rest of the checks require a connection to the primary db
const ConnectionString shardConnString = [&]() {
const auto shard =
- uassertStatusOK(shardRegistry->getShard(txn, config->getPrimaryId()));
+ uassertStatusOK(shardRegistry->getShard(opCtx, config->getPrimaryId()));
return shard->getConnString();
}();
@@ -503,7 +503,7 @@ public:
BSONObj collationArg =
!defaultCollation.isEmpty() ? CollationSpec::kSimpleSpec : BSONObj();
Status status =
- clusterCreateIndex(txn, nss.ns(), proposedKey, collationArg, careAboutUnique);
+ clusterCreateIndex(opCtx, nss.ns(), proposedKey, collationArg, careAboutUnique);
if (!status.isOK()) {
errmsg = str::stream() << "ensureIndex failed to create index on "
<< "primary shard: " << status.reason();
@@ -582,7 +582,7 @@ public:
audit::logShardCollection(Client::getCurrent(), nss.ns(), proposedKey, careAboutUnique);
- uassertStatusOK(catalogClient->shardCollection(txn,
+ uassertStatusOK(catalogClient->shardCollection(opCtx,
nss.ns(),
proposedShardKey,
defaultCollation,
@@ -591,7 +591,7 @@ public:
std::set<ShardId>{}));
// Make sure the cached metadata for the collection knows that we are now sharded
- config->getChunkManager(txn, nss.ns(), true /* reload */);
+ config->getChunkManager(opCtx, nss.ns(), true /* reload */);
result << "collectionsharded" << nss.ns();
@@ -599,14 +599,14 @@ public:
if (isHashedShardKey && isEmpty) {
// Reload the new config info. If we created more than one initial chunk, then
// we need to move them around to balance.
- auto chunkManager = config->getChunkManager(txn, nss.ns(), true);
+ auto chunkManager = config->getChunkManager(opCtx, nss.ns(), true);
ChunkMap chunkMap = chunkManager->getChunkMap();
// 2. Move and commit each "big chunk" to a different shard.
int i = 0;
for (ChunkMap::const_iterator c = chunkMap.begin(); c != chunkMap.end(); ++c, ++i) {
const ShardId& shardId = shardIds[i % numShards];
- const auto toStatus = shardRegistry->getShard(txn, shardId);
+ const auto toStatus = shardRegistry->getShard(opCtx, shardId);
if (!toStatus.isOK()) {
continue;
}
@@ -627,10 +627,10 @@ public:
chunkType.setVersion(chunkManager->getVersion());
Status moveStatus = configsvr_client::moveChunk(
- txn,
+ opCtx,
chunkType,
to->getId(),
- Grid::get(txn)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
+ Grid::get(opCtx)->getBalancerConfiguration()->getMaxChunkSizeBytes(),
MigrationSecondaryThrottleOptions::create(
MigrationSecondaryThrottleOptions::kOff),
true);
@@ -646,7 +646,7 @@ public:
}
// Reload the config info, after all the migrations
- chunkManager = config->getChunkManager(txn, nss.ns(), true);
+ chunkManager = config->getChunkManager(opCtx, nss.ns(), true);
// 3. Subdivide the big chunks by splitting at each of the points in "allSplits"
// that we haven't already split by.
@@ -658,7 +658,7 @@ public:
if (i == allSplits.size() || !currentChunk->containsKey(allSplits[i])) {
if (!subSplits.empty()) {
auto splitStatus = shardutil::splitChunkAtMultiplePoints(
- txn,
+ opCtx,
currentChunk->getShardId(),
nss,
chunkManager->getShardKeyPattern(),
@@ -692,7 +692,7 @@ public:
// Proactively refresh the chunk manager. Not really necessary, but this way it's
// immediately up-to-date the next time it's used.
- config->getChunkManager(txn, nss.ns(), true);
+ config->getChunkManager(opCtx, nss.ns(), true);
}
return true;
diff --git a/src/mongo/s/commands/cluster_shutdown_cmd.cpp b/src/mongo/s/commands/cluster_shutdown_cmd.cpp
index 72f0fd71e6f..95db8c0c478 100644
--- a/src/mongo/s/commands/cluster_shutdown_cmd.cpp
+++ b/src/mongo/s/commands/cluster_shutdown_cmd.cpp
@@ -41,7 +41,7 @@ public:
<< "either (1) ran from localhost or (2) authenticated.";
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp
index 57e5a54881b..b63da3b2ee7 100644
--- a/src/mongo/s/commands/cluster_split_cmd.cpp
+++ b/src/mongo/s/commands/cluster_split_cmd.cpp
@@ -55,7 +55,7 @@ namespace {
* Asks the mongod holding this chunk to find a key that approximately divides the specified chunk
* in two. Throws on error or if the chunk is empty.
*/
-BSONObj selectMedianKey(OperationContext* txn,
+BSONObj selectMedianKey(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -66,10 +66,10 @@ BSONObj selectMedianKey(OperationContext* txn,
chunkRange.append(&cmd);
cmd.appendBool("force", true);
- auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
auto cmdResponse = uassertStatusOK(
- shard->runCommandWithFixedRetryAttempts(txn,
+ shard->runCommandWithFixedRetryAttempts(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
cmd.obj(),
@@ -126,7 +126,7 @@ public:
return parseNsFullyQualified(dbname, cmdObj);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -134,7 +134,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbname, cmdObj));
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
const BSONField<BSONObj> findField("find", BSONObj());
const BSONField<BSONArray> boundsField("bounds", BSONArray());
@@ -197,7 +197,7 @@ public:
if (!find.isEmpty()) {
// find
BSONObj shardKey =
- uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(txn, find));
+ uassertStatusOK(cm->getShardKeyPattern().extractShardKeyFromQuery(opCtx, find));
if (shardKey.isEmpty()) {
errmsg = stream() << "no shard key found in chunk query " << find;
return false;
@@ -255,7 +255,7 @@ public:
// middle of the chunk.
const BSONObj splitPoint = !middle.isEmpty()
? middle
- : selectMedianKey(txn,
+ : selectMedianKey(opCtx,
chunk->getShardId(),
nss,
cm->getShardKeyPattern(),
@@ -267,7 +267,7 @@ public:
<< redact(splitPoint);
uassertStatusOK(
- shardutil::splitChunkAtMultiplePoints(txn,
+ shardutil::splitChunkAtMultiplePoints(opCtx,
chunk->getShardId(),
nss,
cm->getShardKeyPattern(),
@@ -277,7 +277,7 @@ public:
// Proactively refresh the chunk manager. Not strictly necessary, but this way it's
// immediately up-to-date the next time it's used.
- scopedCM.db()->getChunkManagerIfExists(txn, nss.ns(), true);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, nss.ns(), true);
return true;
}
diff --git a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
index bd4c28dc8f8..f1616334ac0 100644
--- a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
+++ b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp
@@ -118,7 +118,7 @@ public:
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
@@ -131,9 +131,9 @@ public:
parsedRequest.appendAsConfigCommand(&cmdBuilder);
cmdBuilder.append("writeConcern", kMajorityWriteConcern.toBSON());
- auto configShard = Grid::get(txn)->shardRegistry()->getConfigShard();
+ auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
auto cmdResponseStatus = uassertStatusOK(
- configShard->runCommandWithFixedRetryAttempts(txn,
+ configShard->runCommandWithFixedRetryAttempts(opCtx,
kPrimaryOnlyReadPreference,
"admin",
cmdBuilder.obj(),
diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp
index 69aee76c4d3..18aa7736fe7 100644
--- a/src/mongo/s/commands/cluster_user_management_commands.cpp
+++ b/src/mongo/s/commands/cluster_user_management_commands.cpp
@@ -86,14 +86,14 @@ public:
return auth::checkAuthForCreateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
}
virtual void redactForLogging(mutablebson::Document* cmdObj) {
@@ -125,7 +125,7 @@ public:
return auth::checkAuthForUpdateUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -136,8 +136,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -175,7 +175,7 @@ public:
return auth::checkAuthForDropUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -186,8 +186,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -221,14 +221,14 @@ public:
return auth::checkAuthForDropAllUsersFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -262,7 +262,7 @@ public:
return auth::checkAuthForGrantRolesToUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -275,8 +275,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -310,7 +310,7 @@ public:
return auth::checkAuthForRevokeRolesFromUserCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -323,8 +323,8 @@ public:
if (!status.isOK()) {
return appendCommandStatus(result, status);
}
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -362,14 +362,14 @@ public:
return auth::checkAuthForUsersInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, dbname, cmdObj, &result);
}
} cmdUsersInfo;
@@ -397,14 +397,14 @@ public:
return auth::checkAuthForCreateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
}
} cmdCreateRole;
@@ -432,14 +432,14 @@ public:
return auth::checkAuthForUpdateRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -473,14 +473,14 @@ public:
return auth::checkAuthForGrantPrivilegesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -514,14 +514,14 @@ public:
return auth::checkAuthForRevokePrivilegesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -555,14 +555,14 @@ public:
return auth::checkAuthForGrantRolesToRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -596,14 +596,14 @@ public:
return auth::checkAuthForRevokeRolesFromRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -640,14 +640,14 @@ public:
return auth::checkAuthForDropRoleCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -685,14 +685,14 @@ public:
return auth::checkAuthForDropAllRolesFromDatabaseCommand(client, dbname);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- const bool ok = Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ const bool ok = Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
AuthorizationManager* authzManager = getGlobalAuthorizationManager();
invariant(authzManager);
@@ -730,14 +730,14 @@ public:
return auth::checkAuthForRolesInfoCommand(client, dbname, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementReadCommand(
- txn, dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementReadCommand(
+ opCtx, dbname, cmdObj, &result);
}
} cmdRolesInfo;
@@ -769,7 +769,7 @@ public:
return auth::checkAuthForInvalidateUserCacheCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -820,14 +820,14 @@ public:
return auth::checkAuthForMergeAuthzCollectionsCommand(client, cmdObj);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
- return Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result);
+ return Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result);
}
} cmdMergeAuthzCollections;
@@ -838,7 +838,7 @@ public:
*
* Returned error indicates a failure.
*/
-Status runUpgradeOnAllShards(OperationContext* txn, int maxSteps, BSONObjBuilder& result) {
+Status runUpgradeOnAllShards(OperationContext* opCtx, int maxSteps, BSONObjBuilder& result) {
BSONObjBuilder cmdObjBuilder;
cmdObjBuilder.append("authSchemaUpgrade", 1);
cmdObjBuilder.append("maxSteps", maxSteps);
@@ -847,19 +847,19 @@ Status runUpgradeOnAllShards(OperationContext* txn, int maxSteps, BSONObjBuilder
const BSONObj cmdObj = cmdObjBuilder.done();
// Upgrade each shard in turn, stopping on first failure.
- auto shardRegistry = Grid::get(txn)->shardRegistry();
- shardRegistry->reload(txn);
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
+ shardRegistry->reload(opCtx);
vector<ShardId> shardIds;
shardRegistry->getAllShardIds(&shardIds);
bool hasWCError = false;
for (const auto& shardId : shardIds) {
- auto shardStatus = shardRegistry->getShard(txn, shardId);
+ auto shardStatus = shardRegistry->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto cmdResult = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
cmdObj,
@@ -910,15 +910,15 @@ public:
return auth::checkAuthForAuthSchemaUpgradeCommand(client);
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) {
// Run the authSchemaUpgrade command on the config servers
- if (!Grid::get(txn)->catalogClient(txn)->runUserManagementWriteCommand(
- txn, getName(), dbname, cmdObj, &result)) {
+ if (!Grid::get(opCtx)->catalogClient(opCtx)->runUserManagementWriteCommand(
+ opCtx, getName(), dbname, cmdObj, &result)) {
return false;
}
@@ -930,7 +930,7 @@ public:
// Optionally run the authSchemaUpgrade command on the individual shards
if (parsedArgs.shouldUpgradeShards) {
- status = runUpgradeOnAllShards(txn, parsedArgs.maxSteps, result);
+ status = runUpgradeOnAllShards(opCtx, parsedArgs.maxSteps, result);
if (!status.isOK()) {
// If the status is a write concern error, append a writeConcernError instead of
// and error message.
diff --git a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
index 9a029d0f861..7f3e5e19c11 100644
--- a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
+++ b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp
@@ -57,7 +57,7 @@ public:
// No auth required
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const std::string& dbname,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/cluster_write.cpp b/src/mongo/s/commands/cluster_write.cpp
index 3468ca6dcf4..730d5e8a178 100644
--- a/src/mongo/s/commands/cluster_write.cpp
+++ b/src/mongo/s/commands/cluster_write.cpp
@@ -66,9 +66,9 @@ void toBatchError(const Status& status, BatchedCommandResponse* response) {
dassert(response->isValid(NULL));
}
-void reloadChunkManager(OperationContext* txn, const NamespaceString& nss) {
- auto config = uassertStatusOK(ScopedShardDatabase::getExisting(txn, nss.db()));
- config.db()->getChunkManagerIfExists(txn, nss.ns(), true);
+void reloadChunkManager(OperationContext* opCtx, const NamespaceString& nss) {
+ auto config = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, nss.db()));
+ config.db()->getChunkManagerIfExists(opCtx, nss.ns(), true);
}
/**
@@ -104,7 +104,7 @@ uint64_t calculateDesiredChunkSize(uint64_t maxChunkSizeBytes, uint64_t numChunk
* ordered list of ascending/descending field names. For example {a : 1, b : -1} is not special, but
* {a : "hashed"} is.
*/
-BSONObj findExtremeKeyForShard(OperationContext* txn,
+BSONObj findExtremeKeyForShard(OperationContext* opCtx,
const NamespaceString& nss,
const ShardId& shardId,
const ShardKeyPattern& shardKeyPattern,
@@ -130,7 +130,8 @@ BSONObj findExtremeKeyForShard(OperationContext* txn,
// Find the extreme key
const auto shardConnStr = [&]() {
- const auto shard = uassertStatusOK(Grid::get(txn)->shardRegistry()->getShard(txn, shardId));
+ const auto shard =
+ uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId));
return shard->getConnString();
}();
@@ -172,8 +173,10 @@ BSONObj findExtremeKeyForShard(OperationContext* txn,
/**
* Splits the chunks touched based from the targeter stats if needed.
*/
-void splitIfNeeded(OperationContext* txn, const NamespaceString& nss, const TargeterStats& stats) {
- auto scopedCMStatus = ScopedChunkManager::get(txn, nss);
+void splitIfNeeded(OperationContext* opCtx,
+ const NamespaceString& nss,
+ const TargeterStats& stats) {
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, nss);
if (!scopedCMStatus.isOK()) {
warning() << "failed to get collection information for " << nss
<< " while checking for auto-split" << causedBy(scopedCMStatus.getStatus());
@@ -196,7 +199,7 @@ void splitIfNeeded(OperationContext* txn, const NamespaceString& nss, const Targ
return;
}
- updateChunkWriteStatsAndSplitIfNeeded(txn, scopedCM.cm().get(), chunk.get(), it->second);
+ updateChunkWriteStatsAndSplitIfNeeded(opCtx, scopedCM.cm().get(), chunk.get(), it->second);
}
}
@@ -205,7 +208,7 @@ void splitIfNeeded(OperationContext* txn, const NamespaceString& nss, const Targ
ClusterWriter::ClusterWriter(bool autoSplit, int timeoutMillis)
: _autoSplit(autoSplit), _timeoutMillis(timeoutMillis) {}
-void ClusterWriter::write(OperationContext* txn,
+void ClusterWriter::write(OperationContext* opCtx,
const BatchedCommandRequest& origRequest,
BatchedCommandResponse* response) {
// Add _ids to insert request if req'd
@@ -291,14 +294,14 @@ void ClusterWriter::write(OperationContext* txn,
request = requestWithWriteConcern.get();
}
- Grid::get(txn)->catalogClient(txn)->writeConfigServerDirect(txn, *request, response);
+ Grid::get(opCtx)->catalogClient(opCtx)->writeConfigServerDirect(opCtx, *request, response);
} else {
TargeterStats targeterStats;
{
ChunkManagerTargeter targeter(request->getTargetingNSS(), &targeterStats);
- Status targetInitStatus = targeter.init(txn);
+ Status targetInitStatus = targeter.init(opCtx);
if (!targetInitStatus.isOK()) {
toBatchError(Status(targetInitStatus.code(),
str::stream()
@@ -313,11 +316,11 @@ void ClusterWriter::write(OperationContext* txn,
DBClientMultiCommand dispatcher;
BatchWriteExec exec(&targeter, &dispatcher);
- exec.executeBatch(txn, *request, response, &_stats);
+ exec.executeBatch(opCtx, *request, response, &_stats);
}
if (_autoSplit) {
- splitIfNeeded(txn, request->getNS(), targeterStats);
+ splitIfNeeded(opCtx, request->getNS(), targeterStats);
}
}
}
@@ -326,7 +329,7 @@ const BatchWriteExecStats& ClusterWriter::getStats() {
return _stats;
}
-void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
+void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
ChunkManager* manager,
Chunk* chunk,
long dataWritten) {
@@ -334,7 +337,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
// bubbled up on the client connection doing a write.
LastError::Disabled d(&LastError::get(cc()));
- const auto balancerConfig = Grid::get(txn)->getBalancerConfiguration();
+ const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration();
const bool minIsInf =
(0 == manager->getShardKeyPattern().getKeyPattern().globalMin().woCompare(chunk->getMin()));
@@ -370,7 +373,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
try {
// Ensure we have the most up-to-date balancer configuration
- uassertStatusOK(balancerConfig->refreshAndCheck(txn));
+ uassertStatusOK(balancerConfig->refreshAndCheck(opCtx));
if (!balancerConfig->getShouldAutoSplit()) {
return;
@@ -393,7 +396,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
}();
auto splitPoints =
- uassertStatusOK(shardutil::selectChunkSplitPoints(txn,
+ uassertStatusOK(shardutil::selectChunkSplitPoints(opCtx,
chunk->getShardId(),
nss,
manager->getShardKeyPattern(),
@@ -425,13 +428,13 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
if (KeyPattern::isOrderedKeyPattern(manager->getShardKeyPattern().toBSON())) {
if (minIsInf) {
BSONObj key = findExtremeKeyForShard(
- txn, nss, chunk->getShardId(), manager->getShardKeyPattern(), true);
+ opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), true);
if (!key.isEmpty()) {
splitPoints.front() = key.getOwned();
}
} else if (maxIsInf) {
BSONObj key = findExtremeKeyForShard(
- txn, nss, chunk->getShardId(), manager->getShardKeyPattern(), false);
+ opCtx, nss, chunk->getShardId(), manager->getShardKeyPattern(), false);
if (!key.isEmpty()) {
splitPoints.back() = key.getOwned();
}
@@ -439,7 +442,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
}
const auto suggestedMigrateChunk =
- uassertStatusOK(shardutil::splitChunkAtMultiplePoints(txn,
+ uassertStatusOK(shardutil::splitChunkAtMultiplePoints(opCtx,
chunk->getShardId(),
nss,
manager->getShardKeyPattern(),
@@ -454,7 +457,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
return false;
auto collStatus =
- Grid::get(txn)->catalogClient(txn)->getCollection(txn, manager->getns());
+ Grid::get(opCtx)->catalogClient(opCtx)->getCollection(opCtx, manager->getns());
if (!collStatus.isOK()) {
log() << "Auto-split for " << nss << " failed to load collection metadata"
<< causedBy(redact(collStatus.getStatus()));
@@ -470,7 +473,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
(shouldBalance ? ")" : ", but no migrations allowed)"));
if (!shouldBalance || !suggestedMigrateChunk) {
- reloadChunkManager(txn, nss);
+ reloadChunkManager(opCtx, nss);
return;
}
@@ -482,7 +485,7 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
// We need to use the latest chunk manager (after the split) in order to have the most
// up-to-date view of the chunk we are about to move
- auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(txn, nss));
+ auto scopedCM = uassertStatusOK(ScopedChunkManager::refreshAndGet(opCtx, nss));
auto suggestedChunk = scopedCM.cm()->findIntersectingChunkWithSimpleCollation(
suggestedMigrateChunk->getMin());
@@ -493,9 +496,9 @@ void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
chunkToMove.setMax(suggestedChunk->getMax());
chunkToMove.setVersion(suggestedChunk->getLastmod());
- uassertStatusOK(configsvr_client::rebalanceChunk(txn, chunkToMove));
+ uassertStatusOK(configsvr_client::rebalanceChunk(opCtx, chunkToMove));
- reloadChunkManager(txn, nss);
+ reloadChunkManager(opCtx, nss);
} catch (const DBException& ex) {
chunk->randomizeBytesWritten();
diff --git a/src/mongo/s/commands/cluster_write.h b/src/mongo/s/commands/cluster_write.h
index 80f4a325ddf..cf41b20bcb5 100644
--- a/src/mongo/s/commands/cluster_write.h
+++ b/src/mongo/s/commands/cluster_write.h
@@ -43,7 +43,7 @@ class ClusterWriter {
public:
ClusterWriter(bool autoSplit, int timeoutMillis);
- void write(OperationContext* txn,
+ void write(OperationContext* opCtx,
const BatchedCommandRequest& request,
BatchedCommandResponse* response);
@@ -61,7 +61,7 @@ private:
* max size of a shard attempt to split the chunk. This call is opportunistic and swallows any
* errors.
*/
-void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* txn,
+void updateChunkWriteStatsAndSplitIfNeeded(OperationContext* opCtx,
ChunkManager* manager,
Chunk* chunk,
long dataWritten);
diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp
index 270510163d5..d8a05b779b6 100644
--- a/src/mongo/s/commands/cluster_write_cmd.cpp
+++ b/src/mongo/s/commands/cluster_write_cmd.cpp
@@ -92,7 +92,7 @@ public:
return status;
}
- virtual Status explain(OperationContext* txn,
+ virtual Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -120,16 +120,16 @@ public:
BatchItemRef targetingBatchItem(&request, 0);
vector<Strategy::CommandResult> shardResults;
Status status =
- _commandOpWrite(txn, dbname, explainCmdBob.obj(), targetingBatchItem, &shardResults);
+ _commandOpWrite(opCtx, dbname, explainCmdBob.obj(), targetingBatchItem, &shardResults);
if (!status.isOK()) {
return status;
}
return ClusterExplain::buildExplainResult(
- txn, shardResults, ClusterExplain::kWriteOnShards, timer.millis(), out);
+ opCtx, shardResults, ClusterExplain::kWriteOnShards, timer.millis(), out);
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbname,
BSONObj& cmdObj,
int options,
@@ -152,7 +152,7 @@ public:
response.setErrCode(ErrorCodes::FailedToParse);
response.setErrMessage(errmsg);
} else {
- writer.write(txn, request, &response);
+ writer.write(opCtx, request, &response);
}
dassert(response.isValid(NULL));
@@ -220,7 +220,7 @@ private:
*
* Does *not* retry or retarget if the metadata is stale.
*/
- static Status _commandOpWrite(OperationContext* txn,
+ static Status _commandOpWrite(OperationContext* opCtx,
const std::string& dbName,
const BSONObj& command,
BatchItemRef targetingBatchItem,
@@ -230,7 +230,7 @@ private:
TargeterStats stats;
ChunkManagerTargeter targeter(
NamespaceString(targetingBatchItem.getRequest()->getTargetingNS()), &stats);
- Status status = targeter.init(txn);
+ Status status = targeter.init(opCtx);
if (!status.isOK())
return status;
@@ -239,22 +239,25 @@ private:
if (targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Insert) {
ShardEndpoint* endpoint;
- Status status = targeter.targetInsert(txn, targetingBatchItem.getDocument(), &endpoint);
+ Status status =
+ targeter.targetInsert(opCtx, targetingBatchItem.getDocument(), &endpoint);
if (!status.isOK())
return status;
endpoints.push_back(endpoint);
} else if (targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Update) {
- Status status = targeter.targetUpdate(txn, *targetingBatchItem.getUpdate(), &endpoints);
+ Status status =
+ targeter.targetUpdate(opCtx, *targetingBatchItem.getUpdate(), &endpoints);
if (!status.isOK())
return status;
} else {
invariant(targetingBatchItem.getOpType() == BatchedCommandRequest::BatchType_Delete);
- Status status = targeter.targetDelete(txn, *targetingBatchItem.getDelete(), &endpoints);
+ Status status =
+ targeter.targetDelete(opCtx, *targetingBatchItem.getDelete(), &endpoints);
if (!status.isOK())
return status;
}
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// Assemble requests
std::vector<AsyncRequestsSender::Request> requests;
@@ -262,7 +265,7 @@ private:
++it) {
const ShardEndpoint* endpoint = *it;
- auto shardStatus = shardRegistry->getShard(txn, endpoint->shardName);
+ auto shardStatus = shardRegistry->getShard(opCtx, endpoint->shardName);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -272,12 +275,12 @@ private:
// Send the requests and wait to receive all the responses.
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- AsyncRequestsSender ars(txn,
- Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(),
+ AsyncRequestsSender ars(opCtx,
+ Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
dbName,
requests,
readPref);
- auto responses = ars.waitForResponses(txn);
+ auto responses = ars.waitForResponses(opCtx);
// Parse the responses.
@@ -294,7 +297,7 @@ private:
invariant(response.shardHostAndPort);
result.target = ConnectionString(std::move(*response.shardHostAndPort));
- auto shardStatus = shardRegistry->getShard(txn, result.target.toString());
+ auto shardStatus = shardRegistry->getShard(opCtx, result.target.toString());
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index e5324266d35..7d14499e9d8 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -85,13 +85,14 @@ using std::vector;
namespace {
-bool cursorCommandPassthrough(OperationContext* txn,
+bool cursorCommandPassthrough(OperationContext* opCtx,
shared_ptr<DBConfig> conf,
const BSONObj& cmdObj,
const NamespaceString& nss,
int options,
BSONObjBuilder* out) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ const auto shardStatus =
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
return Command::appendCommandStatus(*out, shardStatus.getStatus());
@@ -119,12 +120,12 @@ bool cursorCommandPassthrough(OperationContext* txn,
}
StatusWith<BSONObj> transformedResponse =
- storePossibleCursor(txn,
+ storePossibleCursor(opCtx,
HostAndPort(cursor->originalHost()),
response,
nss,
- Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(),
- Grid::get(txn)->getCursorManager());
+ Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(),
+ Grid::get(opCtx)->getCursorManager());
if (!transformedResponse.isOK()) {
return Command::appendCommandStatus(*out, transformedResponse.getStatus());
}
@@ -172,37 +173,37 @@ public:
// all grid commands are designed not to lock
protected:
- bool passthrough(OperationContext* txn,
+ bool passthrough(OperationContext* opCtx,
DBConfig* conf,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- return _passthrough(txn, conf->name(), conf, cmdObj, 0, result);
+ return _passthrough(opCtx, conf->name(), conf, cmdObj, 0, result);
}
- bool adminPassthrough(OperationContext* txn,
+ bool adminPassthrough(OperationContext* opCtx,
DBConfig* conf,
const BSONObj& cmdObj,
BSONObjBuilder& result) {
- return _passthrough(txn, "admin", conf, cmdObj, 0, result);
+ return _passthrough(opCtx, "admin", conf, cmdObj, 0, result);
}
- bool passthrough(OperationContext* txn,
+ bool passthrough(OperationContext* opCtx,
DBConfig* conf,
const BSONObj& cmdObj,
int options,
BSONObjBuilder& result) {
- return _passthrough(txn, conf->name(), conf, cmdObj, options, result);
+ return _passthrough(opCtx, conf->name(), conf, cmdObj, options, result);
}
private:
- bool _passthrough(OperationContext* txn,
+ bool _passthrough(OperationContext* opCtx,
const string& db,
DBConfig* conf,
const BSONObj& cmdObj,
int options,
BSONObjBuilder& result) {
const auto shardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
const auto shard = uassertStatusOK(shardStatus);
ShardConnection conn(shard->getConnString(), "");
@@ -229,13 +230,13 @@ public:
bool implicitCreateDb = false)
: RunOnAllShardsCommand(n, oldname, useShardConn, implicitCreateDb) {}
- virtual void getShardIds(OperationContext* txn,
+ virtual void getShardIds(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
vector<ShardId>& shardIds) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
uassertStatusOK(status.getStatus());
shared_ptr<DBConfig> conf = status.getValue();
@@ -243,7 +244,7 @@ public:
if (!conf->isSharded(nss.ns())) {
shardIds.push_back(conf->getPrimaryId());
} else {
- Grid::get(txn)->shardRegistry()->getAllShardIds(&shardIds);
+ Grid::get(opCtx)->shardRegistry()->getAllShardIds(&shardIds);
}
}
};
@@ -252,7 +253,7 @@ class NotAllowedOnShardedCollectionCmd : public PublicGridCommand {
public:
NotAllowedOnShardedCollectionCmd(const char* n) : PublicGridCommand(n) {}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -260,9 +261,9 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, options, result);
+ return passthrough(opCtx, conf.get(), cmdObj, options, result);
}
return appendCommandStatus(
@@ -451,7 +452,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -459,18 +460,24 @@ public:
BSONObjBuilder& output) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, output);
+ return passthrough(opCtx, conf.get(), cmdObj, output);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(40051, "chunk manager should not be null", cm);
vector<Strategy::CommandResult> results;
const BSONObj query;
- Strategy::commandOp(
- txn, dbName, cmdObj, options, cm->getns(), query, CollationSpec::kSimpleSpec, &results);
+ Strategy::commandOp(opCtx,
+ dbName,
+ cmdObj,
+ options,
+ cm->getns(),
+ query,
+ CollationSpec::kSimpleSpec,
+ &results);
BSONObjBuilder rawResBuilder(output.subobjStart("raw"));
bool isValid = true;
@@ -519,19 +526,19 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
string& errmsg,
BSONObjBuilder& result) {
- auto dbStatus = ScopedShardDatabase::getOrCreate(txn, dbName);
+ auto dbStatus = ScopedShardDatabase::getOrCreate(opCtx, dbName);
if (!dbStatus.isOK()) {
return appendCommandStatus(result, dbStatus.getStatus());
}
auto scopedDb = std::move(dbStatus.getValue());
- return passthrough(txn, scopedDb.db(), cmdObj, result);
+ return passthrough(opCtx, scopedDb.db(), cmdObj, result);
}
} createCmd;
@@ -550,7 +557,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -567,7 +574,7 @@ public:
const string dbNameFrom = fullnsFrom.db().toString();
auto confFrom =
- uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbNameFrom));
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbNameFrom));
const auto fullnsToElt = cmdObj["to"];
uassert(ErrorCodes::InvalidNamespace,
@@ -578,7 +585,8 @@ public:
str::stream() << "Invalid target namespace: " << fullnsTo.ns(),
fullnsTo.isValid());
const string dbNameTo = fullnsTo.db().toString();
- auto confTo = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbNameTo));
+ auto confTo =
+ uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbNameTo));
uassert(
13138, "You can't rename a sharded collection", !confFrom->isSharded(fullnsFrom.ns()));
@@ -592,7 +600,7 @@ public:
"Source and destination collections must be on same shard",
shardFrom == shardTo);
- return adminPassthrough(txn, confFrom.get(), cmdObj, result);
+ return adminPassthrough(opCtx, confFrom.get(), cmdObj, result);
}
} renameCollectionCmd;
@@ -614,7 +622,7 @@ public:
return true;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -629,14 +637,14 @@ public:
"Invalid todb argument",
NamespaceString::validDBName(todb, NamespaceString::DollarInDbNameBehavior::Allow));
- auto scopedToDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, todb));
+ auto scopedToDb = uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, todb));
uassert(ErrorCodes::IllegalOperation,
"Cannot copy to a sharded database",
!scopedToDb.db()->isShardingEnabled());
const string fromhost = cmdObj.getStringField("fromhost");
if (!fromhost.empty()) {
- return adminPassthrough(txn, scopedToDb.db(), cmdObj, result);
+ return adminPassthrough(opCtx, scopedToDb.db(), cmdObj, result);
}
const auto fromDbElt = cmdObj["fromdb"];
@@ -649,7 +657,7 @@ public:
"invalid fromdb argument",
NamespaceString::validDBName(fromdb, NamespaceString::DollarInDbNameBehavior::Allow));
- auto scopedFromDb = uassertStatusOK(ScopedShardDatabase::getExisting(txn, fromdb));
+ auto scopedFromDb = uassertStatusOK(ScopedShardDatabase::getExisting(opCtx, fromdb));
uassert(ErrorCodes::IllegalOperation,
"Cannot copy from a sharded database",
!scopedFromDb.db()->isShardingEnabled());
@@ -662,12 +670,12 @@ public:
}
{
- const auto shard = uassertStatusOK(
- Grid::get(txn)->shardRegistry()->getShard(txn, scopedFromDb.db()->getPrimaryId()));
+ const auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(
+ opCtx, scopedFromDb.db()->getPrimaryId()));
b.append("fromhost", shard->getConnString().toString());
}
- return adminPassthrough(txn, scopedToDb.db(), b.obj(), result);
+ return adminPassthrough(opCtx, scopedToDb.db(), b.obj(), result);
}
} clusterCopyDBCmd;
@@ -687,7 +695,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -695,17 +703,17 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
result.appendBool("sharded", false);
result.append("primary", conf->getPrimaryId().toString());
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
result.appendBool("sharded", true);
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(12594, "how could chunk manager be null!", cm);
BSONObjBuilder shardStats;
@@ -720,7 +728,7 @@ public:
set<ShardId> shardIds;
cm->getAllShardIds(&shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -865,7 +873,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -874,12 +882,12 @@ public:
const string fullns = parseNs(dbName, cmdObj);
const string nsDBName = nsToDatabase(fullns);
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, nsDBName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nsDBName));
if (!conf->isSharded(fullns)) {
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, fullns);
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, fullns);
massert(13407, "how could chunk manager be null!", cm);
BSONObj min = cmdObj.getObjectField("min");
@@ -909,7 +917,7 @@ public:
cm->getShardIdsForRange(min, max, &shardIds);
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -990,7 +998,7 @@ public:
return nss.ns();
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -1013,7 +1021,7 @@ public:
// Note that this implementation will not handle targeting retries and fails when the
// sharding metadata is too stale
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db());
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db());
if (!status.isOK()) {
return Status(status.getStatus().code(),
str::stream() << "Passthrough command failed: " << command.toString()
@@ -1033,7 +1041,7 @@ public:
}
const auto primaryShardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, conf->getPrimaryId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, conf->getPrimaryId());
if (!primaryShardStatus.isOK()) {
return primaryShardStatus.getStatus();
}
@@ -1064,7 +1072,7 @@ public:
cmdResult.target = primaryShardStatus.getValue()->getConnString();
return ClusterExplain::buildExplainResult(
- txn, {cmdResult}, ClusterExplain::kSingleShard, timer.millis(), out);
+ opCtx, {cmdResult}, ClusterExplain::kSingleShard, timer.millis(), out);
}
} groupCmd;
@@ -1088,7 +1096,7 @@ public:
}
return Status::OK();
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1099,7 +1107,8 @@ public:
errmsg = str::stream() << "doing a splitVector across dbs isn't supported via mongos";
return false;
}
- return NotAllowedOnShardedCollectionCmd::run(txn, dbName, cmdObj, options, errmsg, result);
+ return NotAllowedOnShardedCollectionCmd::run(
+ opCtx, dbName, cmdObj, options, errmsg, result);
}
virtual std::string parseNs(const string& dbname, const BSONObj& cmdObj) const {
return parseNsFullyQualified(dbname, cmdObj);
@@ -1127,7 +1136,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1135,7 +1144,7 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!status.isOK()) {
return appendEmptyResultSet(result, status.getStatus(), nss.ns());
}
@@ -1143,7 +1152,7 @@ public:
shared_ptr<DBConfig> conf = status.getValue();
if (!conf->isSharded(nss.ns())) {
- if (passthrough(txn, conf.get(), cmdObj, options, result)) {
+ if (passthrough(opCtx, conf.get(), cmdObj, options, result)) {
return true;
}
@@ -1153,7 +1162,7 @@ public:
result.resetToEmpty();
auto parsedDistinct = ParsedDistinct::parse(
- txn, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), false);
+ opCtx, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), false);
if (!parsedDistinct.isOK()) {
return appendCommandStatus(result, parsedDistinct.getStatus());
}
@@ -1170,7 +1179,7 @@ public:
BSONObjBuilder aggResult;
Command::findCommand("aggregate")
- ->run(txn, dbName, aggCmd.getValue(), options, errmsg, aggResult);
+ ->run(opCtx, dbName, aggCmd.getValue(), options, errmsg, aggResult);
ViewResponseFormatter formatter(aggResult.obj());
auto formatStatus = formatter.appendAsDistinctResponse(&result);
@@ -1183,7 +1192,7 @@ public:
return false;
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(10420, "how could chunk manager be null!", cm);
BSONObj query = getQuery(cmdObj);
@@ -1195,7 +1204,7 @@ public:
// Construct collator for deduping.
std::unique_ptr<CollatorInterface> collator;
if (!queryCollation.getValue().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(queryCollation.getValue());
if (!statusWithCollator.isOK()) {
return appendEmptyResultSet(result, statusWithCollator.getStatus(), nss.ns());
@@ -1204,7 +1213,7 @@ public:
}
set<ShardId> shardIds;
- cm->getShardIdsForQuery(txn, query, queryCollation.getValue(), &shardIds);
+ cm->getShardIdsForQuery(opCtx, query, queryCollation.getValue(), &shardIds);
BSONObjComparator bsonCmp(BSONObj(),
BSONObjComparator::FieldNamesMode::kConsider,
@@ -1213,7 +1222,7 @@ public:
BSONObjSet all = bsonCmp.makeBSONObjSet();
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -1248,7 +1257,7 @@ public:
return true;
}
- Status explain(OperationContext* txn,
+ Status explain(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
ExplainCommon::Verbosity verbosity,
@@ -1287,7 +1296,7 @@ public:
Timer timer;
vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbname,
explainCmdBob.obj(),
options,
@@ -1302,7 +1311,7 @@ public:
ResolvedView::isResolvedViewErrorResponse(shardResults[0].result)) {
auto resolvedView = ResolvedView::fromBSON(shardResults[0].result);
auto parsedDistinct = ParsedDistinct::parse(
- txn, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), true);
+ opCtx, resolvedView.getNamespace(), cmdObj, ExtensionsCallbackNoop(), true);
if (!parsedDistinct.isOK()) {
return parsedDistinct.getStatus();
}
@@ -1319,7 +1328,7 @@ public:
std::string errMsg;
if (Command::findCommand("aggregate")
- ->run(txn, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
+ ->run(opCtx, dbname, aggCmd.getValue(), 0, errMsg, *out)) {
return Status::OK();
}
@@ -1329,7 +1338,7 @@ public:
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, cmdObj);
return ClusterExplain::buildExplainResult(
- txn, shardResults, mongosStageName, millisElapsed, out);
+ opCtx, shardResults, mongosStageName, millisElapsed, out);
}
} disinctCmd;
@@ -1364,7 +1373,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -1372,12 +1381,12 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNs(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(13091, "how could chunk manager be null!", cm);
if (SimpleBSONObjComparator::kInstance.evaluate(cm->getShardKeyPattern().toBSON() ==
BSON("files_id" << 1))) {
@@ -1385,7 +1394,7 @@ public:
vector<Strategy::CommandResult> results;
Strategy::commandOp(
- txn, dbName, cmdObj, 0, nss.ns(), finder, CollationSpec::kSimpleSpec, &results);
+ opCtx, dbName, cmdObj, 0, nss.ns(), finder, CollationSpec::kSimpleSpec, &results);
verify(results.size() == 1); // querying on shard key so should only talk to one shard
BSONObj res = results.begin()->result;
@@ -1418,7 +1427,7 @@ public:
vector<Strategy::CommandResult> results;
try {
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
dbName,
shardCmd,
0,
@@ -1505,7 +1514,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1513,12 +1522,12 @@ public:
BSONObjBuilder& result) {
const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj));
- auto conf = uassertStatusOK(Grid::get(txn)->catalogCache()->getDatabase(txn, dbName));
+ auto conf = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName));
if (!conf->isSharded(nss.ns())) {
- return passthrough(txn, conf.get(), cmdObj, options, result);
+ return passthrough(opCtx, conf.get(), cmdObj, options, result);
}
- shared_ptr<ChunkManager> cm = conf->getChunkManager(txn, nss.ns());
+ shared_ptr<ChunkManager> cm = conf->getChunkManager(opCtx, nss.ns());
massert(13500, "how could chunk manager be null!", cm);
BSONObj query = getQuery(cmdObj);
@@ -1527,7 +1536,7 @@ public:
return appendEmptyResultSet(result, collation.getStatus(), nss.ns());
}
set<ShardId> shardIds;
- cm->getShardIdsForQuery(txn, query, collation.getValue(), &shardIds);
+ cm->getShardIdsForQuery(opCtx, query, collation.getValue(), &shardIds);
// We support both "num" and "limit" options to control limit
int limit = 100;
@@ -1538,7 +1547,7 @@ public:
list<shared_ptr<Future::CommandResult>> futures;
BSONArrayBuilder shardArray;
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
invariant(shardStatus.getStatus() == ErrorCodes::ShardNotFound);
continue;
@@ -1559,7 +1568,7 @@ public:
i != futures.end();
i++) {
shared_ptr<Future::CommandResult> res = *i;
- if (!res->join(txn)) {
+ if (!res->join(opCtx)) {
errmsg = res->result()["errmsg"].String();
if (res->result().hasField("code")) {
result.append(res->result()["code"]);
@@ -1634,7 +1643,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -1657,7 +1666,7 @@ public:
virtual bool supportsWriteConcern(const BSONObj& cmd) const override {
return false;
}
- virtual bool run(OperationContext* txn,
+ virtual bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int,
@@ -1669,13 +1678,13 @@ public:
// $eval isn't allowed to access sharded collections, but we need to leave the
// shard to detect that.
- auto status = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto status = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!status.isOK()) {
return appendCommandStatus(result, status.getStatus());
}
shared_ptr<DBConfig> conf = status.getValue();
- return passthrough(txn, conf.get(), cmdObj, result);
+ return passthrough(opCtx, conf.get(), cmdObj, result);
}
} evalCmd;
@@ -1706,7 +1715,7 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
@@ -1714,12 +1723,12 @@ public:
BSONObjBuilder& result) final {
auto nss = NamespaceString::makeListCollectionsNSS(dbName);
- auto conf = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto conf = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!conf.isOK()) {
return appendEmptyResultSet(result, conf.getStatus(), dbName + ".$cmd.listCollections");
}
- return cursorCommandPassthrough(txn, conf.getValue(), cmdObj, nss, options, &result);
+ return cursorCommandPassthrough(opCtx, conf.getValue(), cmdObj, nss, options, &result);
}
} cmdListCollections;
@@ -1752,13 +1761,13 @@ public:
return false;
}
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const string& dbName,
BSONObj& cmdObj,
int options,
string& errmsg,
BSONObjBuilder& result) final {
- auto conf = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName);
+ auto conf = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName);
if (!conf.isOK()) {
return appendCommandStatus(result, conf.getStatus());
}
@@ -1768,7 +1777,8 @@ public:
NamespaceString::makeListIndexesNSS(targetNss.db(), targetNss.coll());
dassert(targetNss == commandNss.getTargetNSForListIndexes());
- return cursorCommandPassthrough(txn, conf.getValue(), cmdObj, commandNss, options, &result);
+ return cursorCommandPassthrough(
+ opCtx, conf.getValue(), cmdObj, commandNss, options, &result);
}
} cmdListIndexes;
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.cpp b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
index 9b0b26cf14b..b534bf0628a 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.cpp
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.cpp
@@ -64,14 +64,14 @@ BSONObj RunOnAllShardsCommand::specialErrorHandler(const std::string& server,
return originalResult;
}
-void RunOnAllShardsCommand::getShardIds(OperationContext* txn,
+void RunOnAllShardsCommand::getShardIds(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
std::vector<ShardId>& shardIds) {
grid.shardRegistry()->getAllShardIds(&shardIds);
}
-bool RunOnAllShardsCommand::run(OperationContext* txn,
+bool RunOnAllShardsCommand::run(OperationContext* opCtx,
const std::string& dbName,
BSONObj& cmdObj,
int options,
@@ -80,15 +80,15 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << redact(cmdObj);
if (_implicitCreateDb) {
- uassertStatusOK(ScopedShardDatabase::getOrCreate(txn, dbName));
+ uassertStatusOK(ScopedShardDatabase::getOrCreate(opCtx, dbName));
}
std::vector<ShardId> shardIds;
- getShardIds(txn, dbName, cmdObj, shardIds);
+ getShardIds(opCtx, dbName, cmdObj, shardIds);
std::list<std::shared_ptr<Future::CommandResult>> futures;
for (const ShardId& shardId : shardIds) {
- const auto shardStatus = grid.shardRegistry()->getShard(txn, shardId);
+ const auto shardStatus = grid.shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
continue;
}
@@ -120,7 +120,7 @@ bool RunOnAllShardsCommand::run(OperationContext* txn,
++futuresit, ++shardIdsIt) {
std::shared_ptr<Future::CommandResult> res = *futuresit;
- if (res->join(txn)) {
+ if (res->join(opCtx)) {
// success :)
BSONObj result = res->result();
results.emplace_back(shardIdsIt->toString(), result);
diff --git a/src/mongo/s/commands/run_on_all_shards_cmd.h b/src/mongo/s/commands/run_on_all_shards_cmd.h
index f0c983b8578..a01c133b987 100644
--- a/src/mongo/s/commands/run_on_all_shards_cmd.h
+++ b/src/mongo/s/commands/run_on_all_shards_cmd.h
@@ -78,12 +78,12 @@ public:
const BSONObj& originalResult) const;
// The default implementation uses all shards.
- virtual void getShardIds(OperationContext* txn,
+ virtual void getShardIds(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
std::vector<ShardId>& shardIds);
- bool run(OperationContext* txn,
+ bool run(OperationContext* opCtx,
const std::string& db,
BSONObj& cmdObj,
int options,
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 148dcd66e05..a209b0a8f9b 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -82,7 +82,7 @@ using std::stringstream;
namespace {
-void runAgainstRegistered(OperationContext* txn,
+void runAgainstRegistered(OperationContext* opCtx,
const char* ns,
BSONObj& jsobj,
BSONObjBuilder& anObjBuilder,
@@ -104,14 +104,14 @@ void runAgainstRegistered(OperationContext* txn,
return;
}
- execCommandClient(txn, c, queryOptions, ns, jsobj, anObjBuilder);
+ execCommandClient(opCtx, c, queryOptions, ns, jsobj, anObjBuilder);
}
/**
* Called into by the web server. For now we just translate the parameters to their old style
* equivalents.
*/
-void execCommandHandler(OperationContext* txn,
+void execCommandHandler(OperationContext* opCtx,
Command* command,
const rpc::RequestInterface& request,
rpc::ReplyBuilderInterface* replyBuilder) {
@@ -124,7 +124,7 @@ void execCommandHandler(OperationContext* txn,
std::string db = request.getDatabase().rawData();
BSONObjBuilder result;
- execCommandClient(txn, command, queryFlags, request.getDatabase().rawData(), cmdObj, result);
+ execCommandClient(opCtx, command, queryFlags, request.getDatabase().rawData(), cmdObj, result);
replyBuilder->setCommandReply(result.done()).setMetadata(rpc::makeEmptyMetadata());
}
@@ -134,7 +134,7 @@ MONGO_INITIALIZER(InitializeCommandExecCommandHandler)(InitializerContext* const
return Status::OK();
}
-void registerErrorImpl(OperationContext* txn, const DBException& exception) {}
+void registerErrorImpl(OperationContext* opCtx, const DBException& exception) {}
MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
Command::registerRegisterError(registerErrorImpl);
@@ -143,12 +143,12 @@ MONGO_INITIALIZER(InitializeRegisterErrorHandler)(InitializerContext* const) {
} // namespace
-void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm) {
+void Strategy::queryOp(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm) {
globalOpCounters.gotQuery();
const QueryMessage q(*dbm);
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
AuthorizationSession* const authSession = AuthorizationSession::get(client);
Status status = authSession->checkAuthForFind(nss, false);
@@ -183,7 +183,7 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
}();
auto canonicalQuery =
- uassertStatusOK(CanonicalQuery::canonicalize(txn, q, ExtensionsCallbackNoop()));
+ uassertStatusOK(CanonicalQuery::canonicalize(opCtx, q, ExtensionsCallbackNoop()));
// If the $explain flag was set, we must run the operation on the shards as an explain command
// rather than a find command.
@@ -199,7 +199,7 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
BSONObjBuilder explainBuilder;
uassertStatusOK(Strategy::explainFind(
- txn, findCommand, queryRequest, verbosity, metadata, &explainBuilder));
+ opCtx, findCommand, queryRequest, verbosity, metadata, &explainBuilder));
BSONObj explainObj = explainBuilder.done();
replyToQuery(0, // query result flags
@@ -220,7 +220,7 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
// 0 means the cursor is exhausted. Otherwise we assume that a cursor with the returned id can
// be retrieved via the ClusterCursorManager.
auto cursorId =
- ClusterFind::runQuery(txn,
+ ClusterFind::runQuery(opCtx,
*canonicalQuery,
readPreference,
&batch,
@@ -249,10 +249,12 @@ void Strategy::queryOp(OperationContext* txn, const NamespaceString& nss, DbMess
cursorId.getValue());
}
-void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm) {
+void Strategy::clientCommandOp(OperationContext* opCtx,
+ const NamespaceString& nss,
+ DbMessage* dbm) {
const QueryMessage q(*dbm);
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
LOG(3) << "command: " << q.ns << " " << redact(q.query) << " ntoreturn: " << q.ntoreturn
<< " options: " << q.queryOptions;
@@ -282,7 +284,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
const NamespaceString interposedNss("admin", "$cmd");
BSONObjBuilder reply;
runAgainstRegistered(
- txn, interposedNss.ns().c_str(), interposedCmd, reply, q.queryOptions);
+ opCtx, interposedNss.ns().c_str(), interposedCmd, reply, q.queryOptions);
replyToQuery(0, client->session(), dbm->msg(), reply.done());
};
@@ -336,7 +338,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
const int maxTimeMS =
uassertStatusOK(QueryRequest::parseMaxTimeMS(cmdObj[QueryRequest::cmdOptionMaxTimeMS]));
if (maxTimeMS > 0) {
- txn->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
+ opCtx->setDeadlineAfterNowBy(Milliseconds{maxTimeMS});
}
int loops = 5;
@@ -346,7 +348,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
OpQueryReplyBuilder reply;
{
BSONObjBuilder builder(reply.bufBuilderForResults());
- runAgainstRegistered(txn, q.ns, cmdObj, builder, q.queryOptions);
+ runAgainstRegistered(opCtx, q.ns, cmdObj, builder, q.queryOptions);
}
reply.sendCommandReply(client->session(), dbm->msg());
return;
@@ -361,13 +363,13 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
// For legacy reasons, ns may not actually be set in the exception
const std::string staleNS(e.getns().empty() ? std::string(q.ns) : e.getns());
- ShardConnection::checkMyConnectionVersions(txn, staleNS);
+ ShardConnection::checkMyConnectionVersions(opCtx, staleNS);
if (loops < 4) {
// This throws out the entire database cache entry in response to
// StaleConfigException instead of just the collection which encountered it. There
// is no good reason for it other than the lack of lower-granularity cache
// invalidation.
- Grid::get(txn)->catalogCache()->invalidate(NamespaceString(staleNS).db());
+ Grid::get(opCtx)->catalogCache()->invalidate(NamespaceString(staleNS).db());
}
} catch (const DBException& e) {
OpQueryReplyBuilder reply;
@@ -381,7 +383,7 @@ void Strategy::clientCommandOp(OperationContext* txn, const NamespaceString& nss
}
}
-void Strategy::commandOp(OperationContext* txn,
+void Strategy::commandOp(OperationContext* opCtx,
const string& db,
const BSONObj& command,
int options,
@@ -395,7 +397,7 @@ void Strategy::commandOp(OperationContext* txn,
qSpec, CommandInfo(versionedNS, targetingQuery, targetingCollation));
// Initialize the cursor
- cursor.init(txn);
+ cursor.init(opCtx);
set<ShardId> shardIds;
cursor.getQueryShardIds(shardIds);
@@ -411,7 +413,7 @@ void Strategy::commandOp(OperationContext* txn,
}
}
-void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm) {
+void Strategy::getMore(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm) {
const int ntoreturn = dbm->pullInt();
uassert(
34424, str::stream() << "Invalid ntoreturn for OP_GET_MORE: " << ntoreturn, ntoreturn >= 0);
@@ -419,12 +421,12 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess
globalOpCounters.gotGetMore();
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
// TODO: Handle stale config exceptions here from coll being dropped or sharded during op for
// now has same semantics as legacy request.
- auto statusGetDb = Grid::get(txn)->catalogCache()->getDatabase(txn, nss.db());
+ auto statusGetDb = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db());
if (statusGetDb == ErrorCodes::NamespaceNotFound) {
replyToQuery(ResultFlag_CursorNotFound, client->session(), dbm->msg(), 0, 0, 0);
return;
@@ -438,7 +440,7 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess
GetMoreRequest getMoreRequest(nss, cursorId, batchSize, boost::none, boost::none, boost::none);
- auto cursorResponse = ClusterFind::runGetMore(txn, getMoreRequest);
+ auto cursorResponse = ClusterFind::runGetMore(opCtx, getMoreRequest);
if (cursorResponse == ErrorCodes::CursorNotFound) {
replyToQuery(ResultFlag_CursorNotFound, client->session(), dbm->msg(), 0, 0, 0);
return;
@@ -464,7 +466,7 @@ void Strategy::getMore(OperationContext* txn, const NamespaceString& nss, DbMess
cursorResponse.getValue().getCursorId());
}
-void Strategy::killCursors(OperationContext* txn, DbMessage* dbm) {
+void Strategy::killCursors(OperationContext* opCtx, DbMessage* dbm) {
const int numCursors = dbm->pullInt();
massert(34425,
str::stream() << "Invalid killCursors message. numCursors: " << numCursors
@@ -481,9 +483,9 @@ void Strategy::killCursors(OperationContext* txn, DbMessage* dbm) {
ConstDataCursor cursors(dbm->getArray(numCursors));
- Client* const client = txn->getClient();
+ Client* const client = opCtx->getClient();
AuthorizationSession* const authSession = AuthorizationSession::get(client);
- ClusterCursorManager* const manager = Grid::get(txn)->getCursorManager();
+ ClusterCursorManager* const manager = Grid::get(opCtx)->getCursorManager();
for (int i = 0; i < numCursors; ++i) {
const CursorId cursorId = cursors.readAndAdvance<LittleEndian<int64_t>>();
@@ -517,13 +519,13 @@ void Strategy::killCursors(OperationContext* txn, DbMessage* dbm) {
}
}
-void Strategy::writeOp(OperationContext* txn, DbMessage* dbm) {
+void Strategy::writeOp(OperationContext* opCtx, DbMessage* dbm) {
OwnedPointerVector<BatchedCommandRequest> commandRequestsOwned;
std::vector<BatchedCommandRequest*>& commandRequests = commandRequestsOwned.mutableVector();
msgToBatchRequests(dbm->msg(), &commandRequests);
- auto& clientLastError = LastError::get(txn->getClient());
+ auto& clientLastError = LastError::get(opCtx->getClient());
for (auto it = commandRequests.begin(); it != commandRequests.end(); ++it) {
// Multiple commands registered to last error as multiple requests
@@ -546,7 +548,7 @@ void Strategy::writeOp(OperationContext* txn, DbMessage* dbm) {
BSONObj commandBSON = commandRequest->toBSON();
BSONObjBuilder builder;
- runAgainstRegistered(txn, cmdNS.c_str(), commandBSON, builder, 0);
+ runAgainstRegistered(opCtx, cmdNS.c_str(), commandBSON, builder, 0);
bool parsed = commandResponse.parseBSON(builder.done(), nullptr);
(void)parsed; // for compile
@@ -566,7 +568,7 @@ void Strategy::writeOp(OperationContext* txn, DbMessage* dbm) {
}
}
-Status Strategy::explainFind(OperationContext* txn,
+Status Strategy::explainFind(OperationContext* opCtx,
const BSONObj& findCommand,
const QueryRequest& qr,
ExplainCommon::Verbosity verbosity,
@@ -581,7 +583,7 @@ Status Strategy::explainFind(OperationContext* txn,
Timer timer;
std::vector<Strategy::CommandResult> shardResults;
- Strategy::commandOp(txn,
+ Strategy::commandOp(opCtx,
qr.nss().db().toString(),
explainCmdBob.obj(),
options,
@@ -601,13 +603,13 @@ Status Strategy::explainFind(OperationContext* txn,
const char* mongosStageName = ClusterExplain::getStageNameForReadOp(shardResults, findCommand);
return ClusterExplain::buildExplainResult(
- txn, shardResults, mongosStageName, millisElapsed, out);
+ opCtx, shardResults, mongosStageName, millisElapsed, out);
}
/**
* Called into by the commands infrastructure.
*/
-void execCommandClient(OperationContext* txn,
+void execCommandClient(OperationContext* opCtx,
Command* c,
int queryOptions,
const char* ns,
@@ -624,7 +626,7 @@ void execCommandClient(OperationContext* txn,
return;
}
- Status status = Command::checkAuthorization(c, txn, dbname, cmdObj);
+ Status status = Command::checkAuthorization(c, opCtx, dbname, cmdObj);
if (!status.isOK()) {
Command::appendCommandStatus(result, status);
return;
@@ -657,20 +659,20 @@ void execCommandClient(OperationContext* txn,
// attach tracking
rpc::TrackingMetadata trackingMetadata;
trackingMetadata.initWithOperName(c->getName());
- rpc::TrackingMetadata::get(txn) = trackingMetadata;
+ rpc::TrackingMetadata::get(opCtx) = trackingMetadata;
std::string errmsg;
bool ok = false;
try {
if (!supportsWriteConcern) {
- ok = c->run(txn, dbname, cmdObj, queryOptions, errmsg, result);
+ ok = c->run(opCtx, dbname, cmdObj, queryOptions, errmsg, result);
} else {
// Change the write concern while running the command.
- const auto oldWC = txn->getWriteConcern();
- ON_BLOCK_EXIT([&] { txn->setWriteConcern(oldWC); });
- txn->setWriteConcern(wcResult.getValue());
+ const auto oldWC = opCtx->getWriteConcern();
+ ON_BLOCK_EXIT([&] { opCtx->setWriteConcern(oldWC); });
+ opCtx->setWriteConcern(wcResult.getValue());
- ok = c->run(txn, dbname, cmdObj, queryOptions, errmsg, result);
+ ok = c->run(opCtx, dbname, cmdObj, queryOptions, errmsg, result);
}
} catch (const DBException& e) {
result.resetToEmpty();
diff --git a/src/mongo/s/commands/strategy.h b/src/mongo/s/commands/strategy.h
index 2b55c80621f..51a0d1673cc 100644
--- a/src/mongo/s/commands/strategy.h
+++ b/src/mongo/s/commands/strategy.h
@@ -56,26 +56,26 @@ public:
*
* Must not be called with legacy '.$cmd' commands.
*/
- static void queryOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm);
+ static void queryOp(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm);
/**
* Handles a legacy-style getMore request and sends the response back on success (or cursor not
* found) or throws on error.
*/
- static void getMore(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm);
+ static void getMore(OperationContext* opCtx, const NamespaceString& nss, DbMessage* dbm);
/**
* Handles a legacy-style killCursors request. Doesn't send any response on success or throws on
* error.
*/
- static void killCursors(OperationContext* txn, DbMessage* dbm);
+ static void killCursors(OperationContext* opCtx, DbMessage* dbm);
/**
* Handles a legacy-style write operation request and updates the last error state on the client
* with the result from the operation. Doesn't send any response back and does not throw on
* errors.
*/
- static void writeOp(OperationContext* txn, DbMessage* dbm);
+ static void writeOp(OperationContext* opCtx, DbMessage* dbm);
/**
* Executes a legacy-style ($cmd namespace) command. Does not throw and returns the response
@@ -84,7 +84,9 @@ public:
* Catches StaleConfigException errors and retries the command automatically after refreshing
* the metadata for the failing namespace.
*/
- static void clientCommandOp(OperationContext* txn, const NamespaceString& nss, DbMessage* dbm);
+ static void clientCommandOp(OperationContext* opCtx,
+ const NamespaceString& nss,
+ DbMessage* dbm);
/**
* Helper to run an explain of a find operation on the shards. Fills 'out' with the result of
@@ -94,7 +96,7 @@ public:
* Used both if mongos receives an explain command and if it receives an OP_QUERY find with the
* $explain modifier.
*/
- static Status explainFind(OperationContext* txn,
+ static Status explainFind(OperationContext* opCtx,
const BSONObj& findCommand,
const QueryRequest& qr,
ExplainCommon::Verbosity verbosity,
@@ -117,7 +119,7 @@ public:
* TODO: Replace these methods and all other methods of command dispatch with a more general
* command op framework.
*/
- static void commandOp(OperationContext* txn,
+ static void commandOp(OperationContext* opCtx,
const std::string& db,
const BSONObj& command,
int options,
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
index c16f671ba78..f5aec193923 100644
--- a/src/mongo/s/config.cpp
+++ b/src/mongo/s/config.cpp
@@ -81,7 +81,7 @@ void DBConfig::markNSNotSharded(const std::string& ns) {
}
}
-std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext* txn,
+std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext* opCtx,
const std::string& ns,
bool shouldReload,
bool forceReload) {
@@ -89,13 +89,13 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManagerIfExists(OperationContext
LastError::Disabled ignoreForGLE(&LastError::get(cc()));
try {
- return getChunkManager(txn, ns, shouldReload, forceReload);
+ return getChunkManager(opCtx, ns, shouldReload, forceReload);
} catch (const DBException&) {
return nullptr;
}
}
-std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
+std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* opCtx,
const std::string& ns,
bool shouldReload,
bool forceReload) {
@@ -113,7 +113,7 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
// Note: read the _reloadCount inside the _lock mutex, so _loadIfNeeded will always
// be forced to perform a reload.
const auto currentReloadIteration = _reloadCount.load();
- _loadIfNeeded(txn, currentReloadIteration);
+ _loadIfNeeded(opCtx, currentReloadIteration);
it = _collections.find(ns);
}
@@ -139,8 +139,8 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
// currently
std::vector<ChunkType> newestChunk;
if (oldVersion.isSet() && !forceReload) {
- uassertStatusOK(Grid::get(txn)->catalogClient(txn)->getChunks(
- txn,
+ uassertStatusOK(Grid::get(opCtx)->catalogClient(opCtx)->getChunks(
+ opCtx,
BSON(ChunkType::ns(ns)),
BSON(ChunkType::DEPRECATED_lastmod() << -1),
1,
@@ -200,7 +200,7 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
oldManager->getShardKeyPattern(),
oldManager->getDefaultCollator() ? oldManager->getDefaultCollator()->clone() : nullptr,
oldManager->isUnique()));
- tempChunkManager->loadExistingRanges(txn, oldManager.get());
+ tempChunkManager->loadExistingRanges(opCtx, oldManager.get());
if (!tempChunkManager->numChunks()) {
// Maybe we're not sharded any more, so do a full reload
@@ -208,16 +208,16 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
const bool successful = [&]() {
stdx::lock_guard<stdx::mutex> lk(_lock);
- return _loadIfNeeded(txn, currentReloadIteration);
+ return _loadIfNeeded(opCtx, currentReloadIteration);
}();
// If we aren't successful loading the database entry, we don't want to keep the stale
// object around which has invalid data.
if (!successful) {
- Grid::get(txn)->catalogCache()->invalidate(_name);
+ Grid::get(opCtx)->catalogCache()->invalidate(_name);
}
- return getChunkManager(txn, ns);
+ return getChunkManager(opCtx, ns);
}
}
@@ -277,20 +277,20 @@ std::shared_ptr<ChunkManager> DBConfig::getChunkManager(OperationContext* txn,
return ci.cm;
}
-bool DBConfig::load(OperationContext* txn) {
+bool DBConfig::load(OperationContext* opCtx) {
const auto currentReloadIteration = _reloadCount.load();
stdx::lock_guard<stdx::mutex> lk(_lock);
- return _loadIfNeeded(txn, currentReloadIteration);
+ return _loadIfNeeded(opCtx, currentReloadIteration);
}
-bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
+bool DBConfig::_loadIfNeeded(OperationContext* opCtx, Counter reloadIteration) {
if (reloadIteration != _reloadCount.load()) {
return true;
}
- const auto catalogClient = Grid::get(txn)->catalogClient(txn);
+ const auto catalogClient = Grid::get(opCtx)->catalogClient(opCtx);
- auto status = catalogClient->getDatabase(txn, _name);
+ auto status = catalogClient->getDatabase(opCtx, _name);
if (status == ErrorCodes::NamespaceNotFound) {
return false;
}
@@ -310,7 +310,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
std::vector<CollectionType> collections;
repl::OpTime configOpTimeWhenLoadingColl;
uassertStatusOK(
- catalogClient->getCollections(txn, &_name, &collections, &configOpTimeWhenLoadingColl));
+ catalogClient->getCollections(opCtx, &_name, &collections, &configOpTimeWhenLoadingColl));
invariant(configOpTimeWhenLoadingColl >= _configOpTime);
@@ -325,7 +325,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
if (!coll.getDropped()) {
std::unique_ptr<CollatorInterface> defaultCollator;
if (!coll.getDefaultCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
->makeFromBSON(coll.getDefaultCollation());
// The collation was validated upon collection creation.
@@ -342,7 +342,7 @@ bool DBConfig::_loadIfNeeded(OperationContext* txn, Counter reloadIteration) {
coll.getUnique()));
// Do the blocking collection load
- manager->loadExistingRanges(txn, nullptr);
+ manager->loadExistingRanges(opCtx, nullptr);
// Collections with no chunks are unsharded, no matter what the collections entry says
if (manager->numChunks()) {
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
index 2b0ecbb7d07..bbd63cf3b3b 100644
--- a/src/mongo/s/config.h
+++ b/src/mongo/s/config.h
@@ -80,11 +80,11 @@ public:
*/
bool isSharded(const std::string& ns);
- std::shared_ptr<ChunkManager> getChunkManager(OperationContext* txn,
+ std::shared_ptr<ChunkManager> getChunkManager(OperationContext* opCtx,
const std::string& ns,
bool reload = false,
bool forceReload = false);
- std::shared_ptr<ChunkManager> getChunkManagerIfExists(OperationContext* txn,
+ std::shared_ptr<ChunkManager> getChunkManagerIfExists(OperationContext* opCtx,
const std::string& ns,
bool reload = false,
bool forceReload = false);
@@ -93,7 +93,7 @@ public:
* Returns true if it is successful at loading the DBConfig, false if the database is not found,
* and throws on all other errors.
*/
- bool load(OperationContext* txn);
+ bool load(OperationContext* opCtx);
protected:
typedef std::map<std::string, CollectionInfo> CollectionInfoMap;
@@ -105,7 +105,7 @@ protected:
* Also returns true without reloading if reloadIteration is not equal to the _reloadCount.
* This is to avoid multiple threads attempting to reload do duplicate work.
*/
- bool _loadIfNeeded(OperationContext* txn, Counter reloadIteration);
+ bool _loadIfNeeded(OperationContext* opCtx, Counter reloadIteration);
// All member variables are labeled with one of the following codes indicating the
// synchronization rules for accessing them.
diff --git a/src/mongo/s/config_server_client.cpp b/src/mongo/s/config_server_client.cpp
index d3dd7f88dea..bf290d58199 100644
--- a/src/mongo/s/config_server_client.cpp
+++ b/src/mongo/s/config_server_client.cpp
@@ -43,16 +43,16 @@ const ReadPreferenceSetting kPrimaryOnlyReadPreference{ReadPreference::PrimaryOn
} // namespace
-Status moveChunk(OperationContext* txn,
+Status moveChunk(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId,
int64_t maxChunkSizeBytes,
const MigrationSecondaryThrottleOptions& secondaryThrottle,
bool waitForDelete) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
auto shard = shardRegistry->getConfigShard();
auto cmdResponseStatus = shard->runCommand(
- txn,
+ opCtx,
kPrimaryOnlyReadPreference,
"admin",
BalanceChunkRequest::serializeToMoveCommandForConfig(
@@ -65,11 +65,11 @@ Status moveChunk(OperationContext* txn,
return cmdResponseStatus.getValue().commandStatus;
}
-Status rebalanceChunk(OperationContext* txn, const ChunkType& chunk) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+Status rebalanceChunk(OperationContext* opCtx, const ChunkType& chunk) {
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
auto shard = shardRegistry->getConfigShard();
auto cmdResponseStatus = shard->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
kPrimaryOnlyReadPreference,
"admin",
BalanceChunkRequest::serializeToRebalanceCommandForConfig(chunk),
diff --git a/src/mongo/s/config_server_client.h b/src/mongo/s/config_server_client.h
index ec92a493aca..4e825ca1408 100644
--- a/src/mongo/s/config_server_client.h
+++ b/src/mongo/s/config_server_client.h
@@ -46,7 +46,7 @@ namespace configsvr_client {
/**
* Requests the balancer to move the specified chunk off of its current shard to the new shard.
*/
-Status moveChunk(OperationContext* txn,
+Status moveChunk(OperationContext* opCtx,
const ChunkType& chunk,
const ShardId& newShardId,
int64_t maxChunkSizeBytes,
@@ -57,7 +57,7 @@ Status moveChunk(OperationContext* txn,
* Requests the balancer to move the specified chunk off of its current shard to a shard, considered
* more appropriate under the balancing policy which is currently in effect.
*/
-Status rebalanceChunk(OperationContext* txn, const ChunkType& chunk);
+Status rebalanceChunk(OperationContext* opCtx, const ChunkType& chunk);
} // namespace configsvr_client
} // namespace mongo
diff --git a/src/mongo/s/config_server_test_fixture.cpp b/src/mongo/s/config_server_test_fixture.cpp
index 65e8dec3fe8..4f1e6246646 100644
--- a/src/mongo/s/config_server_test_fixture.cpp
+++ b/src/mongo/s/config_server_test_fixture.cpp
@@ -174,7 +174,7 @@ std::shared_ptr<Shard> ConfigServerTestFixture::getConfigShard() const {
return shardRegistry()->getConfigShard();
}
-Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
+Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc) {
auto insert(stdx::make_unique<BatchedInsertRequest>());
@@ -186,7 +186,7 @@ Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
auto config = getConfigShard();
invariant(config);
- auto insertResponse = config->runCommand(txn,
+ auto insertResponse = config->runCommand(opCtx,
kReadPref,
ns.db().toString(),
request.toBSON(),
@@ -198,14 +198,14 @@ Status ConfigServerTestFixture::insertToConfigCollection(OperationContext* txn,
return status;
}
-StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(OperationContext* txn,
+StatusWith<BSONObj> ConfigServerTestFixture::findOneOnConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& filter) {
auto config = getConfigShard();
invariant(config);
auto findStatus = config->exhaustiveFindOnConfig(
- txn, kReadPref, repl::ReadConcernLevel::kMajorityReadConcern, ns, filter, BSONObj(), 1);
+ opCtx, kReadPref, repl::ReadConcernLevel::kMajorityReadConcern, ns, filter, BSONObj(), 1);
if (!findStatus.isOK()) {
return findStatus.getStatus();
}
@@ -231,10 +231,10 @@ Status ConfigServerTestFixture::setupShards(const std::vector<ShardType>& shards
return Status::OK();
}
-StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* txn,
+StatusWith<ShardType> ConfigServerTestFixture::getShardDoc(OperationContext* opCtx,
const std::string& shardId) {
auto doc = findOneOnConfigCollection(
- txn, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name(shardId)));
+ opCtx, NamespaceString(ShardType::ConfigNS), BSON(ShardType::name(shardId)));
if (!doc.isOK()) {
if (doc.getStatus() == ErrorCodes::NoMatchingDocument) {
return {ErrorCodes::ShardNotFound,
@@ -258,21 +258,21 @@ Status ConfigServerTestFixture::setupChunks(const std::vector<ChunkType>& chunks
return Status::OK();
}
-StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* txn,
+StatusWith<ChunkType> ConfigServerTestFixture::getChunkDoc(OperationContext* opCtx,
const BSONObj& minKey) {
auto doc = findOneOnConfigCollection(
- txn, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::min() << minKey));
+ opCtx, NamespaceString(ChunkType::ConfigNS), BSON(ChunkType::min() << minKey));
if (!doc.isOK())
return doc.getStatus();
return ChunkType::fromConfigBSON(doc.getValue());
}
-StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationContext* txn,
+StatusWith<std::vector<BSONObj>> ConfigServerTestFixture::getIndexes(OperationContext* opCtx,
const NamespaceString& ns) {
auto configShard = getConfigShard();
- auto response = configShard->runCommand(txn,
+ auto response = configShard->runCommand(opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
ns.db().toString(),
BSON("listIndexes" << ns.coll().toString()),
diff --git a/src/mongo/s/config_server_test_fixture.h b/src/mongo/s/config_server_test_fixture.h
index 8d8bafa0c96..411b3672902 100644
--- a/src/mongo/s/config_server_test_fixture.h
+++ b/src/mongo/s/config_server_test_fixture.h
@@ -57,14 +57,14 @@ public:
/**
* Insert a document to this config server to the specified namespace.
*/
- Status insertToConfigCollection(OperationContext* txn,
+ Status insertToConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& doc);
/**
* Reads a single document from a collection living on the config server.
*/
- StatusWith<BSONObj> findOneOnConfigCollection(OperationContext* txn,
+ StatusWith<BSONObj> findOneOnConfigCollection(OperationContext* opCtx,
const NamespaceString& ns,
const BSONObj& filter);
@@ -77,7 +77,7 @@ public:
* Retrieves the shard document from the config server.
* Returns {ErrorCodes::ShardNotFound} if the given shard does not exists.
*/
- StatusWith<ShardType> getShardDoc(OperationContext* txn, const std::string& shardId);
+ StatusWith<ShardType> getShardDoc(OperationContext* opCtx, const std::string& shardId);
/**
* Setup the config.chunks collection to contain the given chunks.
@@ -87,12 +87,12 @@ public:
/**
* Retrieves the chunk document from the config server.
*/
- StatusWith<ChunkType> getChunkDoc(OperationContext* txn, const BSONObj& minKey);
+ StatusWith<ChunkType> getChunkDoc(OperationContext* opCtx, const BSONObj& minKey);
/**
* Returns the indexes definitions defined on a given collection.
*/
- StatusWith<std::vector<BSONObj>> getIndexes(OperationContext* txn, const NamespaceString& ns);
+ StatusWith<std::vector<BSONObj>> getIndexes(OperationContext* opCtx, const NamespaceString& ns);
/**
* Returns the stored raw pointer to the addShard TaskExecutor's NetworkInterface.
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
index 1562afebba7..4170e3ee152 100644
--- a/src/mongo/s/grid.h
+++ b/src/mongo/s/grid.h
@@ -98,7 +98,7 @@ public:
* Returns a pointer to a ShardingCatalogClient to use for accessing catalog data stored on the
* config servers.
*/
- ShardingCatalogClient* catalogClient(OperationContext* txn) {
+ ShardingCatalogClient* catalogClient(OperationContext* opCtx) {
return _catalogClient.get();
}
diff --git a/src/mongo/s/local_sharding_info.cpp b/src/mongo/s/local_sharding_info.cpp
index 8cea70e5032..7b8926bac65 100644
--- a/src/mongo/s/local_sharding_info.cpp
+++ b/src/mongo/s/local_sharding_info.cpp
@@ -61,10 +61,10 @@ void enableLocalShardingInfo(ServiceContext* context, Handler handler) {
forService(context).registerHandler(handler);
}
-bool haveLocalShardingInfo(OperationContext* txn, const std::string& ns) {
- auto handler = forService(txn->getServiceContext()).getHandler();
+bool haveLocalShardingInfo(OperationContext* opCtx, const std::string& ns) {
+ auto handler = forService(opCtx->getServiceContext()).getHandler();
if (handler)
- return handler(txn, ns);
+ return handler(opCtx, ns);
return false;
}
diff --git a/src/mongo/s/local_sharding_info.h b/src/mongo/s/local_sharding_info.h
index 42be1a0176b..9be721de42b 100644
--- a/src/mongo/s/local_sharding_info.h
+++ b/src/mongo/s/local_sharding_info.h
@@ -48,6 +48,6 @@ void enableLocalShardingInfo(ServiceContext* context,
/**
* @return true if we have any shard info for the ns
*/
-bool haveLocalShardingInfo(OperationContext* txn, const std::string& ns);
+bool haveLocalShardingInfo(OperationContext* opCtx, const std::string& ns);
} // namespace mongo
diff --git a/src/mongo/s/ns_targeter.h b/src/mongo/s/ns_targeter.h
index 62fd02e5e22..95e19a81a53 100644
--- a/src/mongo/s/ns_targeter.h
+++ b/src/mongo/s/ns_targeter.h
@@ -83,7 +83,7 @@ public:
*
* Returns !OK with message if document could not be targeted for other reasons.
*/
- virtual Status targetInsert(OperationContext* txn,
+ virtual Status targetInsert(OperationContext* opCtx,
const BSONObj& doc,
ShardEndpoint** endpoint) const = 0;
@@ -92,7 +92,7 @@ public:
*
* Returns OK and fills the endpoints; returns a status describing the error otherwise.
*/
- virtual Status targetUpdate(OperationContext* txn,
+ virtual Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints) const = 0;
@@ -101,7 +101,7 @@ public:
*
* Returns OK and fills the endpoints; returns a status describing the error otherwise.
*/
- virtual Status targetDelete(OperationContext* txn,
+ virtual Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints) const = 0;
@@ -147,7 +147,7 @@ public:
* NOTE: This function may block for shared resources or network calls.
* Returns !OK with message if could not refresh
*/
- virtual Status refreshIfNeeded(OperationContext* txn, bool* wasChanged) = 0;
+ virtual Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) = 0;
};
/**
diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp
index afcf5257db0..614002978f1 100644
--- a/src/mongo/s/query/async_results_merger.cpp
+++ b/src/mongo/s/query/async_results_merger.cpp
@@ -253,7 +253,7 @@ ClusterQueryResult AsyncResultsMerger::nextReadyUnsorted() {
return {};
}
-Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t remoteIndex) {
+Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* opCtx, size_t remoteIndex) {
auto& remote = _remotes[remoteIndex];
invariant(!remote.cbHandle.isValid());
@@ -291,14 +291,14 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
}
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, _metadataObj, opCtx);
auto callbackStatus =
_executor->scheduleRemoteCommand(request,
stdx::bind(&AsyncResultsMerger::handleBatchResponse,
this,
stdx::placeholders::_1,
- txn,
+ opCtx,
remoteIndex));
if (!callbackStatus.isOK()) {
return callbackStatus.getStatus();
@@ -317,7 +317,7 @@ Status AsyncResultsMerger::askForNextBatch_inlock(OperationContext* txn, size_t
* 3. Remotes that reached maximum retries will be in 'exhausted' state.
*/
StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
- OperationContext* txn) {
+ OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_lifecycleState != kAlive) {
@@ -345,7 +345,7 @@ StatusWith<executor::TaskExecutor::EventHandle> AsyncResultsMerger::nextEvent(
// If we already have established a cursor with this remote, and there is no outstanding
// request for which we have a valid callback handle, then schedule work to retrieve the
// next batch.
- auto nextBatchStatus = askForNextBatch_inlock(txn, i);
+ auto nextBatchStatus = askForNextBatch_inlock(opCtx, i);
if (!nextBatchStatus.isOK()) {
return nextBatchStatus;
}
@@ -391,7 +391,7 @@ StatusWith<CursorResponse> AsyncResultsMerger::parseCursorResponse(const BSONObj
void AsyncResultsMerger::handleBatchResponse(
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
@@ -426,7 +426,7 @@ void AsyncResultsMerger::handleBatchResponse(
// If the event handle is invalid, then the executor is in the middle of shutting down,
// and we can't schedule any more work for it to complete.
if (_killCursorsScheduledEvent.isValid()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_executor->signalEvent(_killCursorsScheduledEvent);
}
@@ -571,7 +571,7 @@ void AsyncResultsMerger::handleBatchResponse(
// We do not ask for the next batch if the cursor is tailable, as batches received from remote
// tailable cursors should be passed through to the client without asking for more batches.
if (!_params->isTailable && !remote.hasNext() && !remote.exhausted()) {
- remote.status = askForNextBatch_inlock(txn, remoteIndex);
+ remote.status = askForNextBatch_inlock(opCtx, remoteIndex);
if (!remote.status.isOK()) {
return;
}
@@ -602,7 +602,7 @@ bool AsyncResultsMerger::haveOutstandingBatchRequests_inlock() {
return false;
}
-void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
+void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* opCtx) {
invariant(_lifecycleState == kKillStarted);
invariant(_killCursorsScheduledEvent.isValid());
@@ -613,7 +613,7 @@ void AsyncResultsMerger::scheduleKillCursors_inlock(OperationContext* txn) {
BSONObj cmdObj = KillCursorsRequest(_params->nsString, {*remote.cursorId}).toBSON();
executor::RemoteCommandRequest request(
- remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, txn);
+ remote.getTargetHost(), _params->nsString.db().toString(), cmdObj, opCtx);
_executor->scheduleRemoteCommand(
request,
@@ -627,7 +627,7 @@ void AsyncResultsMerger::handleKillCursorsResponse(
// We just ignore any killCursors command responses.
}
-executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* txn) {
+executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* opCtx) {
stdx::lock_guard<stdx::mutex> lk(_mutex);
if (_killCursorsScheduledEvent.isValid()) {
invariant(_lifecycleState != kAlive);
@@ -653,7 +653,7 @@ executor::TaskExecutor::EventHandle AsyncResultsMerger::kill(OperationContext* t
// remotes now. Otherwise, we have to wait until all responses are back, and then we can kill
// the remote cursors.
if (!haveOutstandingBatchRequests_inlock()) {
- scheduleKillCursors_inlock(txn);
+ scheduleKillCursors_inlock(opCtx);
_lifecycleState = kKillComplete;
_executor->signalEvent(_killCursorsScheduledEvent);
}
diff --git a/src/mongo/s/query/async_results_merger.h b/src/mongo/s/query/async_results_merger.h
index 3252a22bf0e..e6766a4faa3 100644
--- a/src/mongo/s/query/async_results_merger.h
+++ b/src/mongo/s/query/async_results_merger.h
@@ -154,7 +154,7 @@ public:
* the caller should call nextEvent() to retry the request on the hosts that errored. If
* ready() is true, then either the error was not retriable or it has exhausted max retries.
*/
- StatusWith<executor::TaskExecutor::EventHandle> nextEvent(OperationContext* txn);
+ StatusWith<executor::TaskExecutor::EventHandle> nextEvent(OperationContext* opCtx);
/**
* Starts shutting down this ARM. Returns a handle to an event which is signaled when this
@@ -169,7 +169,7 @@ public:
*
* May be called multiple times (idempotent).
*/
- executor::TaskExecutor::EventHandle kill(OperationContext* txn);
+ executor::TaskExecutor::EventHandle kill(OperationContext* opCtx);
private:
/**
@@ -291,7 +291,7 @@ private:
*
* Returns success if the command to retrieve the next batch was scheduled successfully.
*/
- Status askForNextBatch_inlock(OperationContext* txn, size_t remoteIndex);
+ Status askForNextBatch_inlock(OperationContext* opCtx, size_t remoteIndex);
/**
* Checks whether or not the remote cursors are all exhausted.
@@ -322,7 +322,7 @@ private:
* buffered.
*/
void handleBatchResponse(const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
- OperationContext* txn,
+ OperationContext* opCtx,
size_t remoteIndex);
/**
@@ -342,7 +342,7 @@ private:
/**
* Schedules a killCursors command to be run on all remote hosts that have open cursors.
*/
- void scheduleKillCursors_inlock(OperationContext* txn);
+ void scheduleKillCursors_inlock(OperationContext* opCtx);
// Not owned here.
executor::TaskExecutor* _executor;
diff --git a/src/mongo/s/query/cluster_client_cursor.h b/src/mongo/s/query/cluster_client_cursor.h
index bea7bbba7aa..bd34689e62f 100644
--- a/src/mongo/s/query/cluster_client_cursor.h
+++ b/src/mongo/s/query/cluster_client_cursor.h
@@ -64,7 +64,7 @@ public:
*
* A non-ok status is returned in case of any error.
*/
- virtual StatusWith<ClusterQueryResult> next(OperationContext* txn) = 0;
+ virtual StatusWith<ClusterQueryResult> next(OperationContext* opCtx) = 0;
/**
* Must be called before destruction to abandon a not-yet-exhausted cursor. If next() has
@@ -72,7 +72,7 @@ public:
*
* May block waiting for responses from remote hosts.
*/
- virtual void kill(OperationContext* txn) = 0;
+ virtual void kill(OperationContext* opCtx) = 0;
/**
* Returns whether or not this cursor is tailing a capped collection on a shard.
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.cpp b/src/mongo/s/query/cluster_client_cursor_impl.cpp
index 9f3157651b8..24ffc0b220a 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_impl.cpp
@@ -41,13 +41,13 @@
namespace mongo {
-ClusterClientCursorGuard::ClusterClientCursorGuard(OperationContext* txn,
+ClusterClientCursorGuard::ClusterClientCursorGuard(OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> ccc)
- : _txn(txn), _ccc(std::move(ccc)) {}
+ : _opCtx(opCtx), _ccc(std::move(ccc)) {}
ClusterClientCursorGuard::~ClusterClientCursorGuard() {
if (_ccc && !_ccc->remotesExhausted()) {
- _ccc->kill(_txn);
+ _ccc->kill(_opCtx);
}
}
@@ -59,12 +59,12 @@ std::unique_ptr<ClusterClientCursor> ClusterClientCursorGuard::releaseCursor() {
return std::move(_ccc);
}
-ClusterClientCursorGuard ClusterClientCursorImpl::make(OperationContext* txn,
+ClusterClientCursorGuard ClusterClientCursorImpl::make(OperationContext* opCtx,
executor::TaskExecutor* executor,
ClusterClientCursorParams&& params) {
std::unique_ptr<ClusterClientCursor> cursor(
new ClusterClientCursorImpl(executor, std::move(params)));
- return ClusterClientCursorGuard(txn, std::move(cursor));
+ return ClusterClientCursorGuard(opCtx, std::move(cursor));
}
ClusterClientCursorImpl::ClusterClientCursorImpl(executor::TaskExecutor* executor,
@@ -75,7 +75,7 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(std::unique_ptr<RouterStageMock
ClusterClientCursorParams&& params)
: _params(std::move(params)), _root(std::move(root)) {}
-StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* opCtx) {
// First return stashed results, if there are any.
if (!_stash.empty()) {
auto front = std::move(_stash.front());
@@ -84,15 +84,15 @@ StatusWith<ClusterQueryResult> ClusterClientCursorImpl::next(OperationContext* t
return {front};
}
- auto next = _root->next(txn);
+ auto next = _root->next(opCtx);
if (next.isOK() && !next.getValue().isEOF()) {
++_numReturnedSoFar;
}
return next;
}
-void ClusterClientCursorImpl::kill(OperationContext* txn) {
- _root->kill(txn);
+void ClusterClientCursorImpl::kill(OperationContext* opCtx) {
+ _root->kill(opCtx);
}
bool ClusterClientCursorImpl::isTailable() const {
diff --git a/src/mongo/s/query/cluster_client_cursor_impl.h b/src/mongo/s/query/cluster_client_cursor_impl.h
index 929cf655849..de4e09d0950 100644
--- a/src/mongo/s/query/cluster_client_cursor_impl.h
+++ b/src/mongo/s/query/cluster_client_cursor_impl.h
@@ -50,7 +50,7 @@ class ClusterClientCursorGuard final {
MONGO_DISALLOW_COPYING(ClusterClientCursorGuard);
public:
- ClusterClientCursorGuard(OperationContext* txn, std::unique_ptr<ClusterClientCursor> ccc);
+ ClusterClientCursorGuard(OperationContext* opCtx, std::unique_ptr<ClusterClientCursor> ccc);
/**
* If a cursor is owned, safely destroys the cursor, cleaning up remote cursor state if
@@ -74,7 +74,7 @@ public:
std::unique_ptr<ClusterClientCursor> releaseCursor();
private:
- OperationContext* _txn;
+ OperationContext* _opCtx;
std::unique_ptr<ClusterClientCursor> _ccc;
};
@@ -85,7 +85,7 @@ public:
/**
* Constructs a CCC whose safe cleanup is ensured by an RAII object.
*/
- static ClusterClientCursorGuard make(OperationContext* txn,
+ static ClusterClientCursorGuard make(OperationContext* opCtx,
executor::TaskExecutor* executor,
ClusterClientCursorParams&& params);
@@ -95,9 +95,9 @@ public:
ClusterClientCursorImpl(std::unique_ptr<RouterStageMock> root,
ClusterClientCursorParams&& params);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool isTailable() const final;
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.cpp b/src/mongo/s/query/cluster_client_cursor_mock.cpp
index 533773742e7..28a4f2643f3 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.cpp
+++ b/src/mongo/s/query/cluster_client_cursor_mock.cpp
@@ -43,7 +43,7 @@ ClusterClientCursorMock::~ClusterClientCursorMock() {
invariant(_exhausted || _killed);
}
-StatusWith<ClusterQueryResult> ClusterClientCursorMock::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterClientCursorMock::next(OperationContext* opCtx) {
invariant(!_killed);
if (_resultsQueue.empty()) {
@@ -66,7 +66,7 @@ long long ClusterClientCursorMock::getNumReturnedSoFar() const {
return _numReturnedSoFar;
}
-void ClusterClientCursorMock::kill(OperationContext* txn) {
+void ClusterClientCursorMock::kill(OperationContext* opCtx) {
_killed = true;
if (_killCallback) {
_killCallback();
diff --git a/src/mongo/s/query/cluster_client_cursor_mock.h b/src/mongo/s/query/cluster_client_cursor_mock.h
index 7011911ce67..baea6660535 100644
--- a/src/mongo/s/query/cluster_client_cursor_mock.h
+++ b/src/mongo/s/query/cluster_client_cursor_mock.h
@@ -43,9 +43,9 @@ public:
~ClusterClientCursorMock();
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool isTailable() const final;
diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp
index 2b4e68ac2cf..85d396490c6 100644
--- a/src/mongo/s/query/cluster_cursor_manager.cpp
+++ b/src/mongo/s/query/cluster_cursor_manager.cpp
@@ -110,9 +110,9 @@ ClusterCursorManager::PinnedCursor& ClusterCursorManager::PinnedCursor::operator
return *this;
}
-StatusWith<ClusterQueryResult> ClusterCursorManager::PinnedCursor::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> ClusterCursorManager::PinnedCursor::next(OperationContext* opCtx) {
invariant(_cursor);
- return _cursor->next(txn);
+ return _cursor->next(opCtx);
}
bool ClusterCursorManager::PinnedCursor::isTailable() const {
@@ -187,7 +187,7 @@ void ClusterCursorManager::shutdown() {
}
StatusWith<CursorId> ClusterCursorManager::registerCursor(
- OperationContext* txn,
+ OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> cursor,
const NamespaceString& nss,
CursorType cursorType,
@@ -199,7 +199,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
if (_inShutdown) {
lk.unlock();
- cursor->kill(txn);
+ cursor->kill(opCtx);
return Status(ErrorCodes::ShutdownInProgress,
"Cannot register new cursors as we are in the process of shutting down");
}
@@ -246,7 +246,7 @@ StatusWith<CursorId> ClusterCursorManager::registerCursor(
}
StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCursor(
- const NamespaceString& nss, CursorId cursorId, OperationContext* txn) {
+ const NamespaceString& nss, CursorId cursorId, OperationContext* opCtx) {
// Read the clock out of the lock.
const auto now = _clockSource->now();
diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h
index 6126ef0757e..ad320452b3b 100644
--- a/src/mongo/s/query/cluster_cursor_manager.h
+++ b/src/mongo/s/query/cluster_cursor_manager.h
@@ -154,7 +154,7 @@ public:
*
* Can block.
*/
- StatusWith<ClusterQueryResult> next(OperationContext* txn);
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx);
/**
* Returns whether or not the underlying cursor is tailing a capped collection. Cannot be
@@ -261,7 +261,7 @@ public:
*
* Does not block.
*/
- StatusWith<CursorId> registerCursor(OperationContext* txn,
+ StatusWith<CursorId> registerCursor(OperationContext* opCtx,
std::unique_ptr<ClusterClientCursor> cursor,
const NamespaceString& nss,
CursorType cursorType,
@@ -282,7 +282,7 @@ public:
*/
StatusWith<PinnedCursor> checkOutCursor(const NamespaceString& nss,
CursorId cursorId,
- OperationContext* txn);
+ OperationContext* opCtx);
/**
* Informs the manager that the given cursor should be killed. The cursor need not necessarily
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index c8790a8cf83..d944954635a 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -149,14 +149,14 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(const QueryReq
return std::move(newQR);
}
-StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
+StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
ChunkManager* chunkManager,
std::shared_ptr<Shard> primary,
std::vector<BSONObj>* results,
BSONObj* viewDefinition) {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
// Get the set of shards on which we will run the query.
std::vector<std::shared_ptr<Shard>> shards;
@@ -166,13 +166,13 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
invariant(chunkManager);
std::set<ShardId> shardIds;
- chunkManager->getShardIdsForQuery(txn,
+ chunkManager->getShardIdsForQuery(opCtx,
query.getQueryRequest().getFilter(),
query.getQueryRequest().getCollation(),
&shardIds);
for (auto id : shardIds) {
- auto shardStatus = shardRegistry->getShard(txn, id);
+ auto shardStatus = shardRegistry->getShard(opCtx, id);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
@@ -231,12 +231,12 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
}
auto ccc = ClusterClientCursorImpl::make(
- txn, Grid::get(txn)->getExecutorPool()->getArbitraryExecutor(), std::move(params));
+ opCtx, Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), std::move(params));
auto cursorState = ClusterCursorManager::CursorState::NotExhausted;
int bytesBuffered = 0;
while (!FindCommon::enoughForFirstBatch(query.getQueryRequest(), results->size())) {
- auto next = ccc->next(txn);
+ auto next = ccc->next(opCtx);
if (!next.isOK()) {
if (viewDefinition &&
@@ -289,21 +289,21 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
}
// Register the cursor with the cursor manager.
- auto cursorManager = Grid::get(txn)->getCursorManager();
+ auto cursorManager = Grid::get(opCtx)->getCursorManager();
const auto cursorType = chunkManager ? ClusterCursorManager::CursorType::NamespaceSharded
: ClusterCursorManager::CursorType::NamespaceNotSharded;
const auto cursorLifetime = query.getQueryRequest().isNoCursorTimeout()
? ClusterCursorManager::CursorLifetime::Immortal
: ClusterCursorManager::CursorLifetime::Mortal;
return cursorManager->registerCursor(
- txn, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime);
+ opCtx, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime);
}
} // namespace
const size_t ClusterFind::kMaxStaleConfigRetries = 10;
-StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
+StatusWith<CursorId> ClusterFind::runQuery(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
std::vector<BSONObj>* results,
@@ -322,7 +322,7 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
// Re-target and re-send the initial find command to the shards until we have established the
// shard version.
for (size_t retries = 1; retries <= kMaxStaleConfigRetries; ++retries) {
- auto scopedCMStatus = ScopedChunkManager::get(txn, query.nss());
+ auto scopedCMStatus = ScopedChunkManager::get(opCtx, query.nss());
if (scopedCMStatus == ErrorCodes::NamespaceNotFound) {
// If the database doesn't exist, we successfully return an empty result set without
// creating a cursor.
@@ -333,8 +333,13 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
const auto& scopedCM = scopedCMStatus.getValue();
- auto cursorId = runQueryWithoutRetrying(
- txn, query, readPref, scopedCM.cm().get(), scopedCM.primary(), results, viewDefinition);
+ auto cursorId = runQueryWithoutRetrying(opCtx,
+ query,
+ readPref,
+ scopedCM.cm().get(),
+ scopedCM.primary(),
+ results,
+ viewDefinition);
if (cursorId.isOK()) {
return cursorId;
}
@@ -353,9 +358,9 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
<< redact(status);
if (status == ErrorCodes::StaleEpoch) {
- Grid::get(txn)->catalogCache()->invalidate(query.nss().db().toString());
+ Grid::get(opCtx)->catalogCache()->invalidate(query.nss().db().toString());
} else {
- scopedCM.db()->getChunkManagerIfExists(txn, query.nss().ns(), true);
+ scopedCM.db()->getChunkManagerIfExists(opCtx, query.nss().ns(), true);
}
}
@@ -364,11 +369,11 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
<< " times without successfully establishing shard version."};
}
-StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* txn,
+StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
const GetMoreRequest& request) {
- auto cursorManager = Grid::get(txn)->getCursorManager();
+ auto cursorManager = Grid::get(opCtx)->getCursorManager();
- auto pinnedCursor = cursorManager->checkOutCursor(request.nss, request.cursorid, txn);
+ auto pinnedCursor = cursorManager->checkOutCursor(request.nss, request.cursorid, opCtx);
if (!pinnedCursor.isOK()) {
return pinnedCursor.getStatus();
}
@@ -391,7 +396,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* txn,
long long startingFrom = pinnedCursor.getValue().getNumReturnedSoFar();
auto cursorState = ClusterCursorManager::CursorState::NotExhausted;
while (!FindCommon::enoughForGetMore(batchSize, batch.size())) {
- auto next = pinnedCursor.getValue().next(txn);
+ auto next = pinnedCursor.getValue().next(opCtx);
if (!next.isOK()) {
return next.getStatus();
}
diff --git a/src/mongo/s/query/cluster_find.h b/src/mongo/s/query/cluster_find.h
index 22d7ad89b04..5a011d27958 100644
--- a/src/mongo/s/query/cluster_find.h
+++ b/src/mongo/s/query/cluster_find.h
@@ -66,7 +66,7 @@ public:
* If a CommandOnShardedViewNotSupportedOnMongod error is returned, then 'viewDefinition', if
* not null, will contain a view definition.
*/
- static StatusWith<CursorId> runQuery(OperationContext* txn,
+ static StatusWith<CursorId> runQuery(OperationContext* opCtx,
const CanonicalQuery& query,
const ReadPreferenceSetting& readPref,
std::vector<BSONObj>* results,
@@ -75,7 +75,7 @@ public:
/**
* Executes the getMore request 'request', and on success returns a CursorResponse.
*/
- static StatusWith<CursorResponse> runGetMore(OperationContext* txn,
+ static StatusWith<CursorResponse> runGetMore(OperationContext* opCtx,
const GetMoreRequest& request);
/**
diff --git a/src/mongo/s/query/router_exec_stage.h b/src/mongo/s/query/router_exec_stage.h
index 5fcb6053e58..f6128a53e43 100644
--- a/src/mongo/s/query/router_exec_stage.h
+++ b/src/mongo/s/query/router_exec_stage.h
@@ -66,13 +66,13 @@ public:
* holding on to a subset of the returned results and need to minimize memory usage, call copy()
* on the BSONObjs.
*/
- virtual StatusWith<ClusterQueryResult> next(OperationContext* txn) = 0;
+ virtual StatusWith<ClusterQueryResult> next(OperationContext* opCtx) = 0;
/**
* Must be called before destruction to abandon a not-yet-exhausted plan. May block waiting for
* responses from remote hosts.
*/
- virtual void kill(OperationContext* txn) = 0;
+ virtual void kill(OperationContext* opCtx) = 0;
/**
* Returns whether or not all the remote cursors are exhausted.
diff --git a/src/mongo/s/query/router_stage_limit.cpp b/src/mongo/s/query/router_stage_limit.cpp
index 4a1a428a533..ea90251eef6 100644
--- a/src/mongo/s/query/router_stage_limit.cpp
+++ b/src/mongo/s/query/router_stage_limit.cpp
@@ -39,12 +39,12 @@ RouterStageLimit::RouterStageLimit(std::unique_ptr<RouterExecStage> child, long
invariant(limit > 0);
}
-StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* opCtx) {
if (_returnedSoFar >= _limit) {
return {ClusterQueryResult()};
}
- auto childResult = getChildStage()->next(txn);
+ auto childResult = getChildStage()->next(opCtx);
if (!childResult.isOK()) {
return childResult;
}
@@ -55,8 +55,8 @@ StatusWith<ClusterQueryResult> RouterStageLimit::next(OperationContext* txn) {
return childResult;
}
-void RouterStageLimit::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageLimit::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageLimit::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_limit.h b/src/mongo/s/query/router_stage_limit.h
index 29fb85dd458..42223902cc1 100644
--- a/src/mongo/s/query/router_stage_limit.h
+++ b/src/mongo/s/query/router_stage_limit.h
@@ -39,9 +39,9 @@ class RouterStageLimit final : public RouterExecStage {
public:
RouterStageLimit(std::unique_ptr<RouterExecStage> child, long long limit);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_merge.cpp b/src/mongo/s/query/router_stage_merge.cpp
index e66aaf91fc4..90a80e7161b 100644
--- a/src/mongo/s/query/router_stage_merge.cpp
+++ b/src/mongo/s/query/router_stage_merge.cpp
@@ -40,9 +40,9 @@ RouterStageMerge::RouterStageMerge(executor::TaskExecutor* executor,
ClusterClientCursorParams* params)
: _executor(executor), _arm(executor, params) {}
-StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* opCtx) {
while (!_arm.ready()) {
- auto nextEventStatus = _arm.nextEvent(txn);
+ auto nextEventStatus = _arm.nextEvent(opCtx);
if (!nextEventStatus.isOK()) {
return nextEventStatus.getStatus();
}
@@ -55,8 +55,8 @@ StatusWith<ClusterQueryResult> RouterStageMerge::next(OperationContext* txn) {
return _arm.nextReady();
}
-void RouterStageMerge::kill(OperationContext* txn) {
- auto killEvent = _arm.kill(txn);
+void RouterStageMerge::kill(OperationContext* opCtx) {
+ auto killEvent = _arm.kill(opCtx);
if (!killEvent) {
// Mongos is shutting down.
return;
diff --git a/src/mongo/s/query/router_stage_merge.h b/src/mongo/s/query/router_stage_merge.h
index 58a8061355e..428a405b401 100644
--- a/src/mongo/s/query/router_stage_merge.h
+++ b/src/mongo/s/query/router_stage_merge.h
@@ -45,9 +45,9 @@ class RouterStageMerge final : public RouterExecStage {
public:
RouterStageMerge(executor::TaskExecutor* executor, ClusterClientCursorParams* params);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_mock.cpp b/src/mongo/s/query/router_stage_mock.cpp
index c348018fe6f..e134340713a 100644
--- a/src/mongo/s/query/router_stage_mock.cpp
+++ b/src/mongo/s/query/router_stage_mock.cpp
@@ -50,7 +50,7 @@ void RouterStageMock::markRemotesExhausted() {
_remotesExhausted = true;
}
-StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* opCtx) {
if (_resultsQueue.empty()) {
return {ClusterQueryResult()};
}
@@ -60,7 +60,7 @@ StatusWith<ClusterQueryResult> RouterStageMock::next(OperationContext* txn) {
return out;
}
-void RouterStageMock::kill(OperationContext* txn) {
+void RouterStageMock::kill(OperationContext* opCtx) {
// No child to kill.
}
diff --git a/src/mongo/s/query/router_stage_mock.h b/src/mongo/s/query/router_stage_mock.h
index dce077d8122..7cba32a81f6 100644
--- a/src/mongo/s/query/router_stage_mock.h
+++ b/src/mongo/s/query/router_stage_mock.h
@@ -44,9 +44,9 @@ class RouterStageMock final : public RouterExecStage {
public:
~RouterStageMock() final {}
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_remove_sortkey.cpp b/src/mongo/s/query/router_stage_remove_sortkey.cpp
index 9c58e489b13..9cb1e4d26c9 100644
--- a/src/mongo/s/query/router_stage_remove_sortkey.cpp
+++ b/src/mongo/s/query/router_stage_remove_sortkey.cpp
@@ -41,8 +41,8 @@ namespace mongo {
RouterStageRemoveSortKey::RouterStageRemoveSortKey(std::unique_ptr<RouterExecStage> child)
: RouterExecStage(std::move(child)) {}
-StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext* txn) {
- auto childResult = getChildStage()->next(txn);
+StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext* opCtx) {
+ auto childResult = getChildStage()->next(opCtx);
if (!childResult.isOK() || !childResult.getValue().getResult()) {
return childResult;
}
@@ -59,8 +59,8 @@ StatusWith<ClusterQueryResult> RouterStageRemoveSortKey::next(OperationContext*
return {builder.obj()};
}
-void RouterStageRemoveSortKey::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageRemoveSortKey::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageRemoveSortKey::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_remove_sortkey.h b/src/mongo/s/query/router_stage_remove_sortkey.h
index 291cf01a803..e3599a3e9b0 100644
--- a/src/mongo/s/query/router_stage_remove_sortkey.h
+++ b/src/mongo/s/query/router_stage_remove_sortkey.h
@@ -41,9 +41,9 @@ class RouterStageRemoveSortKey final : public RouterExecStage {
public:
RouterStageRemoveSortKey(std::unique_ptr<RouterExecStage> child);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/router_stage_skip.cpp b/src/mongo/s/query/router_stage_skip.cpp
index a6bec5c8733..6763ca5808b 100644
--- a/src/mongo/s/query/router_stage_skip.cpp
+++ b/src/mongo/s/query/router_stage_skip.cpp
@@ -39,9 +39,9 @@ RouterStageSkip::RouterStageSkip(std::unique_ptr<RouterExecStage> child, long lo
invariant(skip > 0);
}
-StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* txn) {
+StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* opCtx) {
while (_skippedSoFar < _skip) {
- auto next = getChildStage()->next(txn);
+ auto next = getChildStage()->next(opCtx);
if (!next.isOK()) {
return next;
}
@@ -53,11 +53,11 @@ StatusWith<ClusterQueryResult> RouterStageSkip::next(OperationContext* txn) {
++_skippedSoFar;
}
- return getChildStage()->next(txn);
+ return getChildStage()->next(opCtx);
}
-void RouterStageSkip::kill(OperationContext* txn) {
- getChildStage()->kill(txn);
+void RouterStageSkip::kill(OperationContext* opCtx) {
+ getChildStage()->kill(opCtx);
}
bool RouterStageSkip::remotesExhausted() {
diff --git a/src/mongo/s/query/router_stage_skip.h b/src/mongo/s/query/router_stage_skip.h
index c949271f79e..773220d4fe6 100644
--- a/src/mongo/s/query/router_stage_skip.h
+++ b/src/mongo/s/query/router_stage_skip.h
@@ -39,9 +39,9 @@ class RouterStageSkip final : public RouterExecStage {
public:
RouterStageSkip(std::unique_ptr<RouterExecStage> child, long long skip);
- StatusWith<ClusterQueryResult> next(OperationContext* txn) final;
+ StatusWith<ClusterQueryResult> next(OperationContext* opCtx) final;
- void kill(OperationContext* txn) final;
+ void kill(OperationContext* opCtx) final;
bool remotesExhausted() final;
diff --git a/src/mongo/s/query/store_possible_cursor.cpp b/src/mongo/s/query/store_possible_cursor.cpp
index 1e3b7d03306..8647871b6a7 100644
--- a/src/mongo/s/query/store_possible_cursor.cpp
+++ b/src/mongo/s/query/store_possible_cursor.cpp
@@ -39,7 +39,7 @@
namespace mongo {
-StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
+StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const HostAndPort& server,
const BSONObj& cmdResult,
const NamespaceString& requestedNss,
@@ -62,10 +62,10 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
params.remotes.emplace_back(server, incomingCursorResponse.getValue().getCursorId());
- auto ccc = ClusterClientCursorImpl::make(txn, executor, std::move(params));
+ auto ccc = ClusterClientCursorImpl::make(opCtx, executor, std::move(params));
auto clusterCursorId =
- cursorManager->registerCursor(txn,
+ cursorManager->registerCursor(opCtx,
ccc.releaseCursor(),
requestedNss,
ClusterCursorManager::CursorType::NamespaceNotSharded,
diff --git a/src/mongo/s/query/store_possible_cursor.h b/src/mongo/s/query/store_possible_cursor.h
index f06c959b41c..03d61ac4d33 100644
--- a/src/mongo/s/query/store_possible_cursor.h
+++ b/src/mongo/s/query/store_possible_cursor.h
@@ -57,7 +57,7 @@ class TaskExecutor;
* BSONObj response document describing the newly-created cursor, which is suitable for returning to
* the client.
*/
-StatusWith<BSONObj> storePossibleCursor(OperationContext* txn,
+StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
const HostAndPort& server,
const BSONObj& cmdResult,
const NamespaceString& requestedNss,
diff --git a/src/mongo/s/s_sharding_server_status.cpp b/src/mongo/s/s_sharding_server_status.cpp
index 5be289d5d18..3b91159b110 100644
--- a/src/mongo/s/s_sharding_server_status.cpp
+++ b/src/mongo/s/s_sharding_server_status.cpp
@@ -45,16 +45,16 @@ public:
return true;
}
- BSONObj generateSection(OperationContext* txn,
+ BSONObj generateSection(OperationContext* opCtx,
const BSONElement& configElement) const override {
- auto shardRegistry = Grid::get(txn)->shardRegistry();
+ auto shardRegistry = Grid::get(opCtx)->shardRegistry();
invariant(shardRegistry);
BSONObjBuilder result;
result.append("configsvrConnectionString",
shardRegistry->getConfigServerConnectionString().toString());
- Grid::get(txn)->configOpTime().append(&result, "lastSeenConfigServerOpTime");
+ Grid::get(opCtx)->configOpTime().append(&result, "lastSeenConfigServerOpTime");
return result.obj();
}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
index 2fab9570572..41ad5b8ce7b 100644
--- a/src/mongo/s/server.cpp
+++ b/src/mongo/s/server.cpp
@@ -136,23 +136,23 @@ static void cleanupTask() {
Client::initThreadIfNotAlready();
Client& client = cc();
ServiceContext::UniqueOperationContext uniqueTxn;
- OperationContext* txn = client.getOperationContext();
- if (!txn) {
+ OperationContext* opCtx = client.getOperationContext();
+ if (!opCtx) {
uniqueTxn = client.makeOperationContext();
- txn = uniqueTxn.get();
+ opCtx = uniqueTxn.get();
}
if (serviceContext)
serviceContext->setKillAllOperations();
- if (auto cursorManager = Grid::get(txn)->getCursorManager()) {
+ if (auto cursorManager = Grid::get(opCtx)->getCursorManager()) {
cursorManager->shutdown();
}
- if (auto pool = Grid::get(txn)->getExecutorPool()) {
+ if (auto pool = Grid::get(opCtx)->getExecutorPool()) {
pool->shutdownAndJoin();
}
- if (auto catalog = Grid::get(txn)->catalogClient(txn)) {
- catalog->shutDown(txn);
+ if (auto catalog = Grid::get(opCtx)->catalogClient(opCtx)) {
+ catalog->shutDown(opCtx);
}
}
@@ -173,7 +173,7 @@ static BSONObj buildErrReply(const DBException& ex) {
using namespace mongo;
-static Status initializeSharding(OperationContext* txn) {
+static Status initializeSharding(OperationContext* opCtx) {
auto targeterFactory = stdx::make_unique<RemoteCommandTargeterFactoryImpl>();
auto targeterFactoryPtr = targeterFactory.get();
@@ -198,9 +198,9 @@ static Status initializeSharding(OperationContext* txn) {
stdx::make_unique<ShardFactory>(std::move(buildersMap), std::move(targeterFactory));
Status status = initializeGlobalShardingState(
- txn,
+ opCtx,
mongosGlobalParams.configdbs,
- generateDistLockProcessId(txn),
+ generateDistLockProcessId(opCtx),
std::move(shardFactory),
[]() {
auto hookList = stdx::make_unique<rpc::EgressMetadataHookList>();
@@ -216,7 +216,7 @@ static Status initializeSharding(OperationContext* txn) {
return status;
}
- status = reloadShardRegistryUntilSuccess(txn);
+ status = reloadShardRegistryUntilSuccess(opCtx);
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/s/service_entry_point_mongos.cpp b/src/mongo/s/service_entry_point_mongos.cpp
index 67ac08e9354..623af0e3602 100644
--- a/src/mongo/s/service_entry_point_mongos.cpp
+++ b/src/mongo/s/service_entry_point_mongos.cpp
@@ -100,7 +100,7 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
uassertStatusOK(status);
}
- auto txn = cc().makeOperationContext();
+ auto opCtx = cc().makeOperationContext();
const int32_t msgId = message.header().getId();
@@ -114,8 +114,8 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
// Start a new LastError session. Any exceptions thrown from here onwards will be returned
// to the caller (if the type of the message permits it).
- ClusterLastErrorInfo::get(txn->getClient()).newRequest();
- LastError::get(txn->getClient()).startRequest();
+ ClusterLastErrorInfo::get(opCtx->getClient()).newRequest();
+ LastError::get(opCtx->getClient()).startRequest();
DbMessage dbm(message);
@@ -135,7 +135,7 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
nss.db() != NamespaceString::kLocalDb);
}
- AuthorizationSession::get(txn->getClient())->startRequest(txn.get());
+ AuthorizationSession::get(opCtx->getClient())->startRequest(opCtx.get());
LOG(3) << "Request::process begin ns: " << nss << " msg id: " << msgId
<< " op: " << networkOpToString(op);
@@ -143,19 +143,19 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
switch (op) {
case dbQuery:
if (nss.isCommand() || nss.isSpecialCommand()) {
- Strategy::clientCommandOp(txn.get(), nss, &dbm);
+ Strategy::clientCommandOp(opCtx.get(), nss, &dbm);
} else {
- Strategy::queryOp(txn.get(), nss, &dbm);
+ Strategy::queryOp(opCtx.get(), nss, &dbm);
}
break;
case dbGetMore:
- Strategy::getMore(txn.get(), nss, &dbm);
+ Strategy::getMore(opCtx.get(), nss, &dbm);
break;
case dbKillCursors:
- Strategy::killCursors(txn.get(), &dbm);
+ Strategy::killCursors(opCtx.get(), &dbm);
break;
default:
- Strategy::writeOp(txn.get(), &dbm);
+ Strategy::writeOp(opCtx.get(), &dbm);
break;
}
@@ -172,7 +172,7 @@ void ServiceEntryPointMongos::_sessionLoop(const transport::SessionHandle& sessi
}
// We *always* populate the last error for now
- LastError::get(txn->getClient()).setLastError(ex.getCode(), ex.what());
+ LastError::get(opCtx->getClient()).setLastError(ex.getCode(), ex.what());
}
if ((counter++ & 0xf) == 0) {
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 1a8e77658e1..ef0009bd8d6 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -265,7 +265,7 @@ static BSONElement findEqualityElement(const EqualityMatches& equalities, const
return extractKeyElementFromMatchable(matchable, suffixStr);
}
-StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext* txn,
+StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext* opCtx,
const BSONObj& basicQuery) const {
if (!isValid())
return StatusWith<BSONObj>(BSONObj());
@@ -273,7 +273,8 @@ StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext*
auto qr = stdx::make_unique<QueryRequest>(NamespaceString(""));
qr->setFilter(basicQuery);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx, std::move(qr), ExtensionsCallbackNoop());
if (!statusWithCQ.isOK()) {
return StatusWith<BSONObj>(statusWithCQ.getStatus());
}
diff --git a/src/mongo/s/shard_key_pattern.h b/src/mongo/s/shard_key_pattern.h
index 67c8eeb4a2f..ca4e01a4a5a 100644
--- a/src/mongo/s/shard_key_pattern.h
+++ b/src/mongo/s/shard_key_pattern.h
@@ -164,7 +164,7 @@ public:
* { a : { b : { $eq : "hi" } } } --> returns {} because the query language treats this as
* a : { $eq : { b : ... } }
*/
- StatusWith<BSONObj> extractShardKeyFromQuery(OperationContext* txn,
+ StatusWith<BSONObj> extractShardKeyFromQuery(OperationContext* opCtx,
const BSONObj& basicQuery) const;
BSONObj extractShardKeyFromQuery(const CanonicalQuery& query) const;
diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp
index e3b31a7a5d2..2b4ddb22fae 100644
--- a/src/mongo/s/shard_key_pattern_test.cpp
+++ b/src/mongo/s/shard_key_pattern_test.cpp
@@ -261,9 +261,9 @@ TEST(ShardKeyPattern, ExtractDocShardKeyHashed) {
static BSONObj queryKey(const ShardKeyPattern& pattern, const BSONObj& query) {
QueryTestServiceContext serviceContext;
- auto txn = serviceContext.makeOperationContext();
+ auto opCtx = serviceContext.makeOperationContext();
- StatusWith<BSONObj> status = pattern.extractShardKeyFromQuery(txn.get(), query);
+ StatusWith<BSONObj> status = pattern.extractShardKeyFromQuery(opCtx.get(), query);
if (!status.isOK())
return BSONObj();
return status.getValue();
diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp
index b38fd9657e7..8adf605d4a3 100644
--- a/src/mongo/s/shard_util.cpp
+++ b/src/mongo/s/shard_util.cpp
@@ -53,14 +53,14 @@ const char kShouldMigrate[] = "shouldMigrate";
} // namespace
-StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardId& shardId) {
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+StatusWith<long long> retrieveTotalShardSize(OperationContext* opCtx, const ShardId& shardId) {
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto listDatabasesStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
"admin",
BSON("listDatabases" << 1),
@@ -80,7 +80,7 @@ StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardI
return totalSizeElem.numberLong();
}
-StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
+StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -96,13 +96,13 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
cmd.append("maxChunkObjects", *maxObjs);
}
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
return shardStatus.getStatus();
}
auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryPreferred},
"admin",
cmd.obj(),
@@ -127,7 +127,7 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
}
StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
- OperationContext* txn,
+ OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -176,12 +176,12 @@ StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
Status status{ErrorCodes::InternalError, "Uninitialized value"};
BSONObj cmdResponse;
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(txn, shardId);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId);
if (!shardStatus.isOK()) {
status = shardStatus.getStatus();
} else {
auto cmdStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts(
- txn,
+ opCtx,
ReadPreferenceSetting{ReadPreference::PrimaryOnly},
"admin",
cmdObj,
diff --git a/src/mongo/s/shard_util.h b/src/mongo/s/shard_util.h
index 79fec95e897..4c3eaabac0a 100644
--- a/src/mongo/s/shard_util.h
+++ b/src/mongo/s/shard_util.h
@@ -58,7 +58,7 @@ namespace shardutil {
* ShardNotFound if shard by that id is not available on the registry
* NoSuchKey if the total shard size could not be retrieved
*/
-StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardId& shardId);
+StatusWith<long long> retrieveTotalShardSize(OperationContext* opCtx, const ShardId& shardId);
/**
* Ask the specified shard to figure out the split points for a given chunk.
@@ -71,7 +71,7 @@ StatusWith<long long> retrieveTotalShardSize(OperationContext* txn, const ShardI
* maxObjs Limits the number of objects in each chunk. Zero means max, unspecified means use the
* server default.
*/
-StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
+StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
@@ -92,7 +92,7 @@ StatusWith<std::vector<BSONObj>> selectChunkSplitPoints(OperationContext* txn,
* splitPoints The set of points at which the chunk should be split.
*/
StatusWith<boost::optional<ChunkRange>> splitChunkAtMultiplePoints(
- OperationContext* txn,
+ OperationContext* opCtx,
const ShardId& shardId,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
diff --git a/src/mongo/s/sharding_egress_metadata_hook.cpp b/src/mongo/s/sharding_egress_metadata_hook.cpp
index f40f80f2e50..af263360a0d 100644
--- a/src/mongo/s/sharding_egress_metadata_hook.cpp
+++ b/src/mongo/s/sharding_egress_metadata_hook.cpp
@@ -51,13 +51,13 @@ namespace rpc {
using std::shared_ptr;
Status ShardingEgressMetadataHook::writeRequestMetadata(bool shardedConnection,
- OperationContext* txn,
+ OperationContext* opCtx,
const StringData target,
BSONObjBuilder* metadataBob) {
try {
- audit::writeImpersonatedUsersToMetadata(txn, metadataBob);
+ audit::writeImpersonatedUsersToMetadata(opCtx, metadataBob);
- ClientMetadataIsMasterState::writeToMetadata(txn, metadataBob);
+ ClientMetadataIsMasterState::writeToMetadata(opCtx, metadataBob);
if (!shardedConnection) {
return Status::OK();
}
@@ -68,10 +68,10 @@ Status ShardingEgressMetadataHook::writeRequestMetadata(bool shardedConnection,
}
}
-Status ShardingEgressMetadataHook::writeRequestMetadata(OperationContext* txn,
+Status ShardingEgressMetadataHook::writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* metadataBob) {
- return writeRequestMetadata(true, txn, target.toString(), metadataBob);
+ return writeRequestMetadata(true, opCtx, target.toString(), metadataBob);
}
Status ShardingEgressMetadataHook::readReplyMetadata(const StringData replySource,
diff --git a/src/mongo/s/sharding_egress_metadata_hook.h b/src/mongo/s/sharding_egress_metadata_hook.h
index df105c813bf..1c8849dcea3 100644
--- a/src/mongo/s/sharding_egress_metadata_hook.h
+++ b/src/mongo/s/sharding_egress_metadata_hook.h
@@ -46,7 +46,7 @@ public:
virtual ~ShardingEgressMetadataHook() = default;
Status readReplyMetadata(const HostAndPort& replySource, const BSONObj& metadataObj) override;
- Status writeRequestMetadata(OperationContext* txn,
+ Status writeRequestMetadata(OperationContext* opCtx,
const HostAndPort& target,
BSONObjBuilder* metadataBob) override;
@@ -57,7 +57,7 @@ public:
// contact.
Status readReplyMetadata(const StringData replySource, const BSONObj& metadataObj);
Status writeRequestMetadata(bool shardedConnection,
- OperationContext* txn,
+ OperationContext* opCtx,
const StringData target,
BSONObjBuilder* metadataBob);
diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp
index 1ffcbda99dd..db41e736fc6 100644
--- a/src/mongo/s/sharding_initialization.cpp
+++ b/src/mongo/s/sharding_initialization.cpp
@@ -145,17 +145,17 @@ std::unique_ptr<TaskExecutorPool> makeTaskExecutorPool(
const StringData kDistLockProcessIdForConfigServer("ConfigServer");
-std::string generateDistLockProcessId(OperationContext* txn) {
+std::string generateDistLockProcessId(OperationContext* opCtx) {
std::unique_ptr<SecureRandom> rng(SecureRandom::create());
return str::stream()
<< HostAndPort(getHostName(), serverGlobalParams.port).toString() << ':'
<< durationCount<Seconds>(
- txn->getServiceContext()->getPreciseClockSource()->now().toDurationSinceEpoch())
+ opCtx->getServiceContext()->getPreciseClockSource()->now().toDurationSinceEpoch())
<< ':' << rng->nextInt64();
}
-Status initializeGlobalShardingState(OperationContext* txn,
+Status initializeGlobalShardingState(OperationContext* opCtx,
const ConnectionString& configCS,
StringData distLockProcessId,
std::unique_ptr<ShardFactory> shardFactory,
@@ -189,7 +189,7 @@ Status initializeGlobalShardingState(OperationContext* txn,
auto shardRegistry(stdx::make_unique<ShardRegistry>(std::move(shardFactory), configCS));
auto catalogClient =
- makeCatalogClient(txn->getServiceContext(), shardRegistry.get(), distLockProcessId);
+ makeCatalogClient(opCtx->getServiceContext(), shardRegistry.get(), distLockProcessId);
auto rawCatalogClient = catalogClient.get();
@@ -227,20 +227,20 @@ Status initializeGlobalShardingState(OperationContext* txn,
return Status::OK();
}
-Status reloadShardRegistryUntilSuccess(OperationContext* txn) {
+Status reloadShardRegistryUntilSuccess(OperationContext* opCtx) {
if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) {
return Status::OK();
}
while (!globalInShutdownDeprecated()) {
- auto stopStatus = txn->checkForInterruptNoAssert();
+ auto stopStatus = opCtx->checkForInterruptNoAssert();
if (!stopStatus.isOK()) {
return stopStatus;
}
try {
- uassertStatusOK(ClusterIdentityLoader::get(txn)->loadClusterId(
- txn, repl::ReadConcernLevel::kMajorityReadConcern));
+ uassertStatusOK(ClusterIdentityLoader::get(opCtx)->loadClusterId(
+ opCtx, repl::ReadConcernLevel::kMajorityReadConcern));
if (grid.shardRegistry()->isUp()) {
return Status::OK();
}
diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h
index 33e3046074c..f2e3d12db8b 100644
--- a/src/mongo/s/sharding_initialization.h
+++ b/src/mongo/s/sharding_initialization.h
@@ -63,13 +63,13 @@ extern const StringData kDistLockProcessIdForConfigServer;
/**
* Generates a uniform string to be used as a process id for the distributed lock manager.
*/
-std::string generateDistLockProcessId(OperationContext* txn);
+std::string generateDistLockProcessId(OperationContext* opCtx);
/**
* Takes in the connection string for reaching the config servers and initializes the global
* ShardingCatalogClient, ShardingCatalogManager, ShardRegistry, and Grid objects.
*/
-Status initializeGlobalShardingState(OperationContext* txn,
+Status initializeGlobalShardingState(OperationContext* opCtx,
const ConnectionString& configCS,
StringData distLockProcessId,
std::unique_ptr<ShardFactory> shardFactory,
@@ -80,6 +80,6 @@ Status initializeGlobalShardingState(OperationContext* txn,
* Tries to contact the config server and reload the shard registry and the cluster ID until it
* succeeds or is interrupted.
*/
-Status reloadShardRegistryUntilSuccess(OperationContext* txn);
+Status reloadShardRegistryUntilSuccess(OperationContext* opCtx);
} // namespace mongo
diff --git a/src/mongo/s/sharding_raii.cpp b/src/mongo/s/sharding_raii.cpp
index ea50d5ce128..b90f975ed35 100644
--- a/src/mongo/s/sharding_raii.cpp
+++ b/src/mongo/s/sharding_raii.cpp
@@ -47,9 +47,9 @@ ScopedShardDatabase::ScopedShardDatabase(std::shared_ptr<DBConfig> db) : _db(db)
ScopedShardDatabase::~ScopedShardDatabase() = default;
-StatusWith<ScopedShardDatabase> ScopedShardDatabase::getExisting(OperationContext* txn,
+StatusWith<ScopedShardDatabase> ScopedShardDatabase::getExisting(OperationContext* opCtx,
StringData dbName) {
- auto dbStatus = Grid::get(txn)->catalogCache()->getDatabase(txn, dbName.toString());
+ auto dbStatus = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName.toString());
if (!dbStatus.isOK()) {
return {dbStatus.getStatus().code(),
str::stream() << "Database " << dbName << " was not found due to "
@@ -59,18 +59,18 @@ StatusWith<ScopedShardDatabase> ScopedShardDatabase::getExisting(OperationContex
return {ScopedShardDatabase(std::move(dbStatus.getValue()))};
}
-StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContext* txn,
+StatusWith<ScopedShardDatabase> ScopedShardDatabase::getOrCreate(OperationContext* opCtx,
StringData dbName) {
- auto dbStatus = getExisting(txn, dbName);
+ auto dbStatus = getExisting(opCtx, dbName);
if (dbStatus.isOK()) {
return dbStatus;
}
if (dbStatus == ErrorCodes::NamespaceNotFound) {
auto statusCreateDb =
- Grid::get(txn)->catalogClient(txn)->createDatabase(txn, dbName.toString());
+ Grid::get(opCtx)->catalogClient(opCtx)->createDatabase(opCtx, dbName.toString());
if (statusCreateDb.isOK() || statusCreateDb == ErrorCodes::NamespaceExists) {
- return getExisting(txn, dbName);
+ return getExisting(opCtx, dbName);
}
return statusCreateDb;
@@ -87,22 +87,22 @@ ScopedChunkManager::ScopedChunkManager(ScopedShardDatabase db, std::shared_ptr<S
ScopedChunkManager::~ScopedChunkManager() = default;
-StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* txn,
+StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* opCtx,
const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db());
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, nss.db());
if (!scopedDbStatus.isOK()) {
return scopedDbStatus.getStatus();
}
auto scopedDb = std::move(scopedDbStatus.getValue());
- auto cm = scopedDb.db()->getChunkManagerIfExists(txn, nss.ns());
+ auto cm = scopedDb.db()->getChunkManagerIfExists(opCtx, nss.ns());
if (cm) {
return {ScopedChunkManager(std::move(scopedDb), std::move(cm))};
}
auto shardStatus =
- Grid::get(txn)->shardRegistry()->getShard(txn, scopedDb.db()->getPrimaryId());
+ Grid::get(opCtx)->shardRegistry()->getShard(opCtx, scopedDb.db()->getPrimaryId());
if (!shardStatus.isOK()) {
return {ErrorCodes::fromInt(40371),
str::stream() << "The primary shard for collection " << nss.ns()
@@ -113,19 +113,19 @@ StatusWith<ScopedChunkManager> ScopedChunkManager::get(OperationContext* txn,
return {ScopedChunkManager(std::move(scopedDb), std::move(shardStatus.getValue()))};
}
-StatusWith<ScopedChunkManager> ScopedChunkManager::getOrCreate(OperationContext* txn,
+StatusWith<ScopedChunkManager> ScopedChunkManager::getOrCreate(OperationContext* opCtx,
const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getOrCreate(txn, nss.db());
+ auto scopedDbStatus = ScopedShardDatabase::getOrCreate(opCtx, nss.db());
if (!scopedDbStatus.isOK()) {
return scopedDbStatus.getStatus();
}
- return ScopedChunkManager::get(txn, nss);
+ return ScopedChunkManager::get(opCtx, nss);
}
-StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* txn,
+StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContext* opCtx,
const NamespaceString& nss) {
- auto scopedDbStatus = ScopedShardDatabase::getExisting(txn, nss.db());
+ auto scopedDbStatus = ScopedShardDatabase::getExisting(opCtx, nss.db());
if (!scopedDbStatus.isOK()) {
return scopedDbStatus.getStatus();
}
@@ -134,7 +134,7 @@ StatusWith<ScopedChunkManager> ScopedChunkManager::refreshAndGet(OperationContex
try {
std::shared_ptr<ChunkManager> cm =
- scopedDb.db()->getChunkManager(txn, nss.ns(), true, false);
+ scopedDb.db()->getChunkManager(opCtx, nss.ns(), true, false);
if (!cm) {
return {ErrorCodes::NamespaceNotSharded,
diff --git a/src/mongo/s/sharding_raii.h b/src/mongo/s/sharding_raii.h
index 92d5858f36b..0c54f281985 100644
--- a/src/mongo/s/sharding_raii.h
+++ b/src/mongo/s/sharding_raii.h
@@ -48,14 +48,14 @@ public:
* Otherwise, either returns NamespaceNotFound if the database does not exist, or any other
* error code indicating why the database could not be loaded.
*/
- static StatusWith<ScopedShardDatabase> getExisting(OperationContext* txn, StringData dbName);
+ static StatusWith<ScopedShardDatabase> getExisting(OperationContext* opCtx, StringData dbName);
/**
* If the specified database exists already, loads it in the cache (if not already there) and
* returns it. Otherwise, if it does not exis, this call will implicitly create it as
* non-sharded.
*/
- static StatusWith<ScopedShardDatabase> getOrCreate(OperationContext* txn, StringData dbName);
+ static StatusWith<ScopedShardDatabase> getOrCreate(OperationContext* opCtx, StringData dbName);
/**
* Returns the underlying database cache entry.
@@ -94,13 +94,13 @@ public:
* Returns NamespaceNotFound if the database does not exist, or any other error indicating
* problem communicating with the config server.
*/
- static StatusWith<ScopedChunkManager> get(OperationContext* txn, const NamespaceString& nss);
+ static StatusWith<ScopedChunkManager> get(OperationContext* opCtx, const NamespaceString& nss);
/**
* If the database holding the specified namespace does not exist, creates it and then behaves
* like the 'get' method above.
*/
- static StatusWith<ScopedChunkManager> getOrCreate(OperationContext* txn,
+ static StatusWith<ScopedChunkManager> getOrCreate(OperationContext* opCtx,
const NamespaceString& nss);
/**
@@ -110,7 +110,7 @@ public:
* metadata and if so incorporates those. Otherwise, if it does not exist or any other error
* occurs, passes that error back.
*/
- static StatusWith<ScopedChunkManager> refreshAndGet(OperationContext* txn,
+ static StatusWith<ScopedChunkManager> refreshAndGet(OperationContext* opCtx,
const NamespaceString& nss);
/**
diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp
index d2d170df176..8d507dbfce6 100644
--- a/src/mongo/s/sharding_uptime_reporter.cpp
+++ b/src/mongo/s/sharding_uptime_reporter.cpp
@@ -57,7 +57,9 @@ std::string constructInstanceIdString() {
* Reports the uptime status of the current instance to the config.pings collection. This method
* is best-effort and never throws.
*/
-void reportStatus(OperationContext* txn, const std::string& instanceId, const Timer& upTimeTimer) {
+void reportStatus(OperationContext* opCtx,
+ const std::string& instanceId,
+ const Timer& upTimeTimer) {
MongosType mType;
mType.setName(instanceId);
mType.setPing(jsTime());
@@ -67,8 +69,8 @@ void reportStatus(OperationContext* txn, const std::string& instanceId, const Ti
mType.setMongoVersion(VersionInfoInterface::instance().version().toString());
try {
- Grid::get(txn)->catalogClient(txn)->updateConfigDocument(
- txn,
+ Grid::get(opCtx)->catalogClient(opCtx)->updateConfigDocument(
+ opCtx,
MongosType::ConfigNS,
BSON(MongosType::name(instanceId)),
BSON("$set" << mType.toBSON()),
@@ -99,11 +101,12 @@ void ShardingUptimeReporter::startPeriodicThread() {
while (!globalInShutdownDeprecated()) {
{
- auto txn = cc().makeOperationContext();
- reportStatus(txn.get(), instanceId, upTimeTimer);
+ auto opCtx = cc().makeOperationContext();
+ reportStatus(opCtx.get(), instanceId, upTimeTimer);
- auto status =
- Grid::get(txn.get())->getBalancerConfiguration()->refreshAndCheck(txn.get());
+ auto status = Grid::get(opCtx.get())
+ ->getBalancerConfiguration()
+ ->refreshAndCheck(opCtx.get());
if (!status.isOK()) {
warning() << "failed to refresh mongos settings" << causedBy(status);
}
diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp
index 6a6395eea13..57f84edf800 100644
--- a/src/mongo/s/write_ops/batch_write_exec.cpp
+++ b/src/mongo/s/write_ops/batch_write_exec.cpp
@@ -84,7 +84,7 @@ static void noteStaleResponses(const vector<ShardError*>& staleErrors, NSTargete
// This only applies when no writes are occurring and metadata is not changing on reload
static const int kMaxRoundsWithoutProgress(5);
-void BatchWriteExec::executeBatch(OperationContext* txn,
+void BatchWriteExec::executeBatch(OperationContext* opCtx,
const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse,
BatchWriteExecStats* stats) {
@@ -132,7 +132,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
// record target errors definitively.
bool recordTargetErrors = refreshedTargeter;
Status targetStatus =
- batchOp.targetBatch(txn, *_targeter, recordTargetErrors, &childBatches);
+ batchOp.targetBatch(opCtx, *_targeter, recordTargetErrors, &childBatches);
if (!targetStatus.isOK()) {
// Don't do anything until a targeter refresh
_targeter->noteCouldNotTarget();
@@ -171,8 +171,8 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
// Figure out what host we need to dispatch our targeted batch
const ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly, TagSet());
- auto shardStatus = Grid::get(txn)->shardRegistry()->getShard(
- txn, nextBatch->getEndpoint().shardName);
+ auto shardStatus = Grid::get(opCtx)->shardRegistry()->getShard(
+ opCtx, nextBatch->getEndpoint().shardName);
bool resolvedHost = false;
ConnectionString shardHost;
@@ -327,7 +327,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn,
//
bool targeterChanged = false;
- Status refreshStatus = _targeter->refreshIfNeeded(txn, &targeterChanged);
+ Status refreshStatus = _targeter->refreshIfNeeded(opCtx, &targeterChanged);
if (!refreshStatus.isOK()) {
// It's okay if we can't refresh, we'll just record errors for the ops if
diff --git a/src/mongo/s/write_ops/batch_write_exec.h b/src/mongo/s/write_ops/batch_write_exec.h
index 739e16a046d..b430e3c5baf 100644
--- a/src/mongo/s/write_ops/batch_write_exec.h
+++ b/src/mongo/s/write_ops/batch_write_exec.h
@@ -72,7 +72,7 @@ public:
*
* This function does not throw, any errors are reported via the clientResponse.
*/
- void executeBatch(OperationContext* txn,
+ void executeBatch(OperationContext* opCtx,
const BatchedCommandRequest& clientRequest,
BatchedCommandResponse* clientResponse,
BatchWriteExecStats* stats);
diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp
index 4f7a4efe52c..6f0aad917b4 100644
--- a/src/mongo/s/write_ops/batch_write_op.cpp
+++ b/src/mongo/s/write_ops/batch_write_op.cpp
@@ -233,7 +233,7 @@ static void cancelBatches(const WriteErrorDetail& why,
batchMap->clear();
}
-Status BatchWriteOp::targetBatch(OperationContext* txn,
+Status BatchWriteOp::targetBatch(OperationContext* opCtx,
const NSTargeter& targeter,
bool recordTargetErrors,
vector<TargetedWriteBatch*>* targetedBatches) {
@@ -293,7 +293,7 @@ Status BatchWriteOp::targetBatch(OperationContext* txn,
OwnedPointerVector<TargetedWrite> writesOwned;
vector<TargetedWrite*>& writes = writesOwned.mutableVector();
- Status targetStatus = writeOp.targetWrites(txn, targeter, &writes);
+ Status targetStatus = writeOp.targetWrites(opCtx, targeter, &writes);
if (!targetStatus.isOK()) {
WriteErrorDetail targetError;
diff --git a/src/mongo/s/write_ops/batch_write_op.h b/src/mongo/s/write_ops/batch_write_op.h
index 030767851c3..455b84e1cd3 100644
--- a/src/mongo/s/write_ops/batch_write_op.h
+++ b/src/mongo/s/write_ops/batch_write_op.h
@@ -106,7 +106,7 @@ public:
*
* Returned TargetedWriteBatches are owned by the caller.
*/
- Status targetBatch(OperationContext* txn,
+ Status targetBatch(OperationContext* opCtx,
const NSTargeter& targeter,
bool recordTargetErrors,
std::vector<TargetedWriteBatch*>* targetedBatches);
diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp
index ea50d84a996..e5bc9b5a5cd 100644
--- a/src/mongo/s/write_ops/batch_write_op_test.cpp
+++ b/src/mongo/s/write_ops/batch_write_op_test.cpp
@@ -135,7 +135,7 @@ TEST(WriteOpTests, SingleOp) {
// Single-op targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -152,7 +152,7 @@ TEST(WriteOpTests, SingleOp) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -175,7 +175,7 @@ TEST(WriteOpTests, SingleError) {
// Single-op error test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -192,7 +192,7 @@ TEST(WriteOpTests, SingleError) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -221,7 +221,7 @@ TEST(WriteOpTests, SingleTargetError) {
// Single-op targeting error test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -238,14 +238,14 @@ TEST(WriteOpTests, SingleTargetError) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(!status.isOK());
ASSERT(!batchOp.isFinished());
ASSERT_EQUALS(targeted.size(), 0u);
// Record targeting failures
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
ASSERT(status.isOK());
ASSERT(batchOp.isFinished());
@@ -264,7 +264,7 @@ TEST(WriteOpTests, SingleWriteConcernErrorOrdered) {
// write concern error if one occurs
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -281,7 +281,7 @@ TEST(WriteOpTests, SingleWriteConcernErrorOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -314,7 +314,7 @@ TEST(WriteOpTests, SingleStaleError) {
// We should retry the same batch until we're not stale
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -329,7 +329,7 @@ TEST(WriteOpTests, SingleStaleError) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(0, &response);
@@ -340,14 +340,14 @@ TEST(WriteOpTests, SingleStaleError) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
// Respond again with a stale response
batchOp.noteBatchResponse(*targeted.front(), response, NULL);
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
buildResponse(1, &response);
@@ -381,7 +381,7 @@ TEST(WriteOpTests, MultiOpSameShardOrdered) {
// Multi-op targeting test (ordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -400,7 +400,7 @@ TEST(WriteOpTests, MultiOpSameShardOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -425,7 +425,7 @@ TEST(WriteOpTests, MultiOpSameShardUnordered) {
// Multi-op targeting test (unordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -444,7 +444,7 @@ TEST(WriteOpTests, MultiOpSameShardUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -470,7 +470,7 @@ TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
// There should be two sets of single batches (one to each shard, one-by-one)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -490,7 +490,7 @@ TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -506,7 +506,7 @@ TEST(WriteOpTests, MultiOpTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
ASSERT_EQUALS(targeted.size(), 1u);
@@ -529,7 +529,7 @@ TEST(WriteOpTests, MultiOpTwoShardsUnordered) {
// There should be one set of two batches (one to each shard)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -549,7 +549,7 @@ TEST(WriteOpTests, MultiOpTwoShardsUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -581,7 +581,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
// There should be two sets of two batches to each shard (two for each delete op)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -603,7 +603,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -624,7 +624,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
ASSERT_EQUALS(targeted.size(), 2u);
@@ -652,7 +652,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachUnordered) {
// There should be one set of two batches to each shard (containing writes for both ops)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -674,7 +674,7 @@ TEST(WriteOpTests, MultiOpTwoShardsEachUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -708,7 +708,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
// last ops should be batched together
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -736,7 +736,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -753,7 +753,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -774,7 +774,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -792,7 +792,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -820,7 +820,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsUnordered) {
// Should batch all the ops together into two batches of four ops for each shard
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -848,7 +848,7 @@ TEST(WriteOpTests, MultiOpOneOrTwoShardsUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -882,7 +882,7 @@ TEST(WriteOpTests, MultiOpSingleShardErrorUnordered) {
// There should be one set of two batches to each shard and an error reported
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -901,7 +901,7 @@ TEST(WriteOpTests, MultiOpSingleShardErrorUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -946,7 +946,7 @@ TEST(WriteOpTests, MultiOpTwoShardErrorsUnordered) {
// There should be one set of two batches to each shard and and two errors reported
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -965,7 +965,7 @@ TEST(WriteOpTests, MultiOpTwoShardErrorsUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -1013,7 +1013,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorUnordered) {
// There should be one set of two batches to each shard and an error reported
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1034,7 +1034,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -1080,7 +1080,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorOrdered) {
// op should not get run
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1101,7 +1101,7 @@ TEST(WriteOpTests, MultiOpPartialSingleShardErrorOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT(!batchOp.isFinished());
@@ -1151,7 +1151,7 @@ TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
// Don't suppress the error if ordered : false
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1169,7 +1169,7 @@ TEST(WriteOpTests, MultiOpErrorAndWriteConcernErrorUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1195,7 +1195,7 @@ TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
// Suppress the write concern error if ordered and we also have an error
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1214,7 +1214,7 @@ TEST(WriteOpTests, SingleOpErrorAndWriteConcernErrorOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1246,7 +1246,7 @@ TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
// Targeting failure on second op in batch op (ordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1265,14 +1265,14 @@ TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
// First targeting round fails since we may be stale
ASSERT(!status.isOK());
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second targeting round is ok, but should stop at first write
ASSERT(status.isOK());
@@ -1288,7 +1288,7 @@ TEST(WriteOpTests, MultiOpFailedTargetOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second targeting round results in an error which finishes the batch
ASSERT(status.isOK());
@@ -1309,7 +1309,7 @@ TEST(WriteOpTests, MultiOpFailedTargetUnordered) {
// Targeting failure on second op in batch op (unordered)
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1329,14 +1329,14 @@ TEST(WriteOpTests, MultiOpFailedTargetUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
// First targeting round fails since we may be stale
ASSERT(!status.isOK());
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second targeting round is ok, and should record an error
ASSERT(status.isOK());
@@ -1366,7 +1366,7 @@ TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
// Expect this gets translated down into write errors for first affected write
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1384,7 +1384,7 @@ TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1394,7 +1394,7 @@ TEST(WriteOpTests, MultiOpFailedBatchOrdered) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
buildErrResponse(ErrorCodes::UnknownError, "mock error", &response);
@@ -1419,7 +1419,7 @@ TEST(WriteOpTests, MultiOpFailedBatchUnordered) {
// Expect this gets translated down into write errors for all affected writes
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1438,7 +1438,7 @@ TEST(WriteOpTests, MultiOpFailedBatchUnordered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1472,7 +1472,7 @@ TEST(WriteOpTests, MultiOpAbortOrdered) {
// Expect this gets translated down into write error for first affected write
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1490,7 +1490,7 @@ TEST(WriteOpTests, MultiOpAbortOrdered) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1522,7 +1522,7 @@ TEST(WriteOpTests, MultiOpAbortUnordered) {
// Expect this gets translated down into write errors for all affected writes
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1563,7 +1563,7 @@ TEST(WriteOpTests, MultiOpTwoWCErrors) {
// error
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED());
ShardEndpoint endpointB(ShardId("shardB"), ChunkVersion::IGNORED());
@@ -1581,7 +1581,7 @@ TEST(WriteOpTests, MultiOpTwoWCErrors) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
BatchedCommandResponse response;
buildResponse(1, &response);
@@ -1592,7 +1592,7 @@ TEST(WriteOpTests, MultiOpTwoWCErrors) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, true, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, true, &targeted);
// Second shard write write concern fails.
batchOp.noteBatchResponse(*targeted.front(), response, NULL);
@@ -1615,7 +1615,7 @@ TEST(WriteOpLimitTests, OneBigDoc) {
// Big single operation test - should go through
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1634,7 +1634,7 @@ TEST(WriteOpLimitTests, OneBigDoc) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
@@ -1650,7 +1650,7 @@ TEST(WriteOpLimitTests, OneBigOneSmall) {
// Big doc with smaller additional doc - should go through as two batches
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1671,7 +1671,7 @@ TEST(WriteOpLimitTests, OneBigOneSmall) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
@@ -1683,7 +1683,7 @@ TEST(WriteOpLimitTests, OneBigOneSmall) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 1u);
@@ -1697,7 +1697,7 @@ TEST(WriteOpLimitTests, TooManyOps) {
// Batch of 1002 documents
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1716,7 +1716,7 @@ TEST(WriteOpLimitTests, TooManyOps) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 1000u);
@@ -1728,7 +1728,7 @@ TEST(WriteOpLimitTests, TooManyOps) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_EQUALS(targeted.front()->getWrites().size(), 2u);
@@ -1743,7 +1743,7 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
// calculation
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
MockNSTargeter targeter;
@@ -1780,7 +1780,7 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
OwnedPointerVector<TargetedWriteBatch> targetedOwned;
vector<TargetedWriteBatch*>& targeted = targetedOwned.mutableVector();
- Status status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ Status status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
@@ -1796,7 +1796,7 @@ TEST(WriteOpLimitTests, UpdateOverheadIncluded) {
ASSERT(!batchOp.isFinished());
targetedOwned.clear();
- status = batchOp.targetBatch(&txn, targeter, false, &targeted);
+ status = batchOp.targetBatch(&opCtx, targeter, false, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(targeted.size(), 1u);
ASSERT_LESS_THAN(targeted.front()->getWrites().size(), 1000u);
diff --git a/src/mongo/s/write_ops/mock_ns_targeter.h b/src/mongo/s/write_ops/mock_ns_targeter.h
index 44bc5be18c2..a430e3caa7d 100644
--- a/src/mongo/s/write_ops/mock_ns_targeter.h
+++ b/src/mongo/s/write_ops/mock_ns_targeter.h
@@ -85,7 +85,9 @@ public:
/**
* Returns a ShardEndpoint for the doc from the mock ranges
*/
- Status targetInsert(OperationContext* txn, const BSONObj& doc, ShardEndpoint** endpoint) const {
+ Status targetInsert(OperationContext* opCtx,
+ const BSONObj& doc,
+ ShardEndpoint** endpoint) const {
std::vector<ShardEndpoint*> endpoints;
Status status = targetQuery(doc, &endpoints);
if (!status.isOK())
@@ -99,7 +101,7 @@ public:
* Returns the first ShardEndpoint for the query from the mock ranges. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/
- Status targetUpdate(OperationContext* txn,
+ Status targetUpdate(OperationContext* opCtx,
const BatchedUpdateDocument& updateDoc,
std::vector<ShardEndpoint*>* endpoints) const {
return targetQuery(updateDoc.getQuery(), endpoints);
@@ -109,7 +111,7 @@ public:
* Returns the first ShardEndpoint for the query from the mock ranges. Only can handle
* queries of the form { field : { $gte : <value>, $lt : <value> } }.
*/
- Status targetDelete(OperationContext* txn,
+ Status targetDelete(OperationContext* opCtx,
const BatchedDeleteDocument& deleteDoc,
std::vector<ShardEndpoint*>* endpoints) const {
return targetQuery(deleteDoc.getQuery(), endpoints);
@@ -140,7 +142,7 @@ public:
// No-op
}
- Status refreshIfNeeded(OperationContext* txn, bool* wasChanged) {
+ Status refreshIfNeeded(OperationContext* opCtx, bool* wasChanged) {
// No-op
if (wasChanged)
*wasChanged = false;
diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp
index 25f9f13b3aa..6f58a58a124 100644
--- a/src/mongo/s/write_ops/write_op.cpp
+++ b/src/mongo/s/write_ops/write_op.cpp
@@ -65,7 +65,7 @@ const WriteErrorDetail& WriteOp::getOpError() const {
return *_error;
}
-Status WriteOp::targetWrites(OperationContext* txn,
+Status WriteOp::targetWrites(OperationContext* opCtx,
const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites) {
bool isUpdate = _itemRef.getOpType() == BatchedCommandRequest::BatchType_Update;
@@ -77,16 +77,16 @@ Status WriteOp::targetWrites(OperationContext* txn,
vector<ShardEndpoint*>& endpoints = endpointsOwned.mutableVector();
if (isUpdate) {
- targetStatus = targeter.targetUpdate(txn, *_itemRef.getUpdate(), &endpoints);
+ targetStatus = targeter.targetUpdate(opCtx, *_itemRef.getUpdate(), &endpoints);
} else if (isDelete) {
- targetStatus = targeter.targetDelete(txn, *_itemRef.getDelete(), &endpoints);
+ targetStatus = targeter.targetDelete(opCtx, *_itemRef.getDelete(), &endpoints);
} else {
dassert(_itemRef.getOpType() == BatchedCommandRequest::BatchType_Insert);
ShardEndpoint* endpoint = NULL;
// TODO: Remove the index targeting stuff once there is a command for it
if (!isIndexInsert) {
- targetStatus = targeter.targetInsert(txn, _itemRef.getDocument(), &endpoint);
+ targetStatus = targeter.targetInsert(opCtx, _itemRef.getDocument(), &endpoint);
} else {
// TODO: Retry index writes with stale version?
targetStatus = targeter.targetCollection(&endpoints);
diff --git a/src/mongo/s/write_ops/write_op.h b/src/mongo/s/write_ops/write_op.h
index d56be517fe7..1be62c36d0e 100644
--- a/src/mongo/s/write_ops/write_op.h
+++ b/src/mongo/s/write_ops/write_op.h
@@ -122,7 +122,7 @@ public:
* Returns !OK if the targeting process itself fails
* (no TargetedWrites will be added, state unchanged)
*/
- Status targetWrites(OperationContext* txn,
+ Status targetWrites(OperationContext* opCtx,
const NSTargeter& targeter,
std::vector<TargetedWrite*>* targetedWrites);
diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp
index c54d4f7070a..b780a79a344 100644
--- a/src/mongo/s/write_ops/write_op_test.cpp
+++ b/src/mongo/s/write_ops/write_op_test.cpp
@@ -83,7 +83,7 @@ TEST(WriteOpTests, TargetSingle) {
// Basic targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -105,7 +105,7 @@ TEST(WriteOpTests, TargetSingle) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -141,7 +141,7 @@ TEST(WriteOpTests, TargetMultiOneShard) {
// Multi-write targeting test where our query goes to one shard
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
@@ -167,7 +167,7 @@ TEST(WriteOpTests, TargetMultiOneShard) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -184,7 +184,7 @@ TEST(WriteOpTests, TargetMultiAllShards) {
// Multi-write targeting test where our write goes to more than one shard
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion(10, 0, OID()));
@@ -211,7 +211,7 @@ TEST(WriteOpTests, TargetMultiAllShards) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -236,7 +236,7 @@ TEST(WriteOpTests, ErrorSingle) {
// Single error after targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -258,7 +258,7 @@ TEST(WriteOpTests, ErrorSingle) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -282,7 +282,7 @@ TEST(WriteOpTests, CancelSingle) {
// Cancel single targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -304,7 +304,7 @@ TEST(WriteOpTests, CancelSingle) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);
@@ -325,7 +325,7 @@ TEST(WriteOpTests, RetrySingleOp) {
// Retry single targeting test
//
- OperationContextNoop txn;
+ OperationContextNoop opCtx;
NamespaceString nss("foo.bar");
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED());
@@ -347,7 +347,7 @@ TEST(WriteOpTests, RetrySingleOp) {
OwnedPointerVector<TargetedWrite> targetedOwned;
vector<TargetedWrite*>& targeted = targetedOwned.mutableVector();
- Status status = writeOp.targetWrites(&txn, targeter, &targeted);
+ Status status = writeOp.targetWrites(&opCtx, targeter, &targeted);
ASSERT(status.isOK());
ASSERT_EQUALS(writeOp.getWriteState(), WriteOpState_Pending);