diff options
author | Andy Schwerin <schwerin@mongodb.com> | 2016-09-06 16:57:35 -0400 |
---|---|---|
committer | Andy Schwerin <schwerin@mongodb.com> | 2016-09-07 09:26:50 -0400 |
commit | 645a77b3fa5b28d29d245e30cc195fd5a8eda049 (patch) | |
tree | d91a9bc7ed7012e753ec8d92859f7f342a994f86 /src/mongo/s | |
parent | 1f389ce467330cda1171d2a04bd0e0b2890aaf8d (diff) | |
download | mongo-645a77b3fa5b28d29d245e30cc195fd5a8eda049.tar.gz |
SERVER-24600 Increase interruptibility of RemoteCommandTargeter::findHost.
By making more calls of RemoteCommandTargeter::findHost interruptible, this
change speeds up the shutdown of mongos when no config servers are discoverable.
Diffstat (limited to 'src/mongo/s')
-rw-r--r-- | src/mongo/s/balancer/migration_manager.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp | 7 | ||||
-rw-r--r-- | src/mongo/s/client/shard_remote.cpp | 6 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_get_last_error_cmd.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/commands/cluster_write_cmd.cpp | 2 | ||||
-rw-r--r-- | src/mongo/s/query/async_results_merger.cpp | 3 | ||||
-rw-r--r-- | src/mongo/s/server.cpp | 4 | ||||
-rw-r--r-- | src/mongo/s/write_ops/batch_write_exec.cpp | 2 |
8 files changed, 14 insertions, 15 deletions
diff --git a/src/mongo/s/balancer/migration_manager.cpp b/src/mongo/s/balancer/migration_manager.cpp index b73199ec848..ba4a488ffcb 100644 --- a/src/mongo/s/balancer/migration_manager.cpp +++ b/src/mongo/s/balancer/migration_manager.cpp @@ -327,8 +327,7 @@ shared_ptr<Notification<Status>> MigrationManager::_schedule( const auto fromShard = fromShardStatus.getValue(); auto fromHostStatus = - fromShard->getTargeter()->findHost(ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - RemoteCommandTargeter::selectFindHostMaxWaitTime(txn)); + fromShard->getTargeter()->findHost(txn, ReadPreferenceSetting{ReadPreference::PrimaryOnly}); if (!fromHostStatus.isOK()) { return std::make_shared<Notification<Status>>(std::move(fromHostStatus.getStatus())); } diff --git a/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp b/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp index f58c323fd12..88c7d4c482f 100644 --- a/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp +++ b/src/mongo/s/catalog/replset/sharding_catalog_manager_impl.cpp @@ -325,8 +325,7 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManagerImpl::_runCommandForAdd RemoteCommandTargeter* targeter, const std::string& dbName, const BSONObj& cmdObj) { - auto host = targeter->findHost(ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - RemoteCommandTargeter::selectFindHostMaxWaitTime(txn)); + auto host = targeter->findHost(txn, ReadPreferenceSetting{ReadPreference::PrimaryOnly}); if (!host.isOK()) { return host.getStatus(); } @@ -1712,8 +1711,8 @@ void ShardingCatalogManagerImpl::_scheduleAddShardTask( // Schedule the shardIdentity upsert request to run immediately, and track the handle. - auto swHost = targeter->findHost(ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - Milliseconds(kDefaultFindHostMaxWaitTime)); + auto swHost = targeter->findHostWithMaxWait(ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + Milliseconds(kDefaultFindHostMaxWaitTime)); if (!swHost.isOK()) { // A 3.2 mongos must have previously successfully communicated with hosts in this shard, // so a failure to find a host here is probably transient, and it is safe to retry. diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp index 3dbae46be66..200f3fc30ad 100644 --- a/src/mongo/s/client/shard_remote.cpp +++ b/src/mongo/s/client/shard_remote.cpp @@ -176,8 +176,7 @@ Shard::HostWithResponse ShardRemote::_runCommand(OperationContext* txn, if (getId() == "config") { readPrefWithMinOpTime.minOpTime = grid.configOpTime(); } - const auto host = _targeter->findHost(readPrefWithMinOpTime, - RemoteCommandTargeter::selectFindHostMaxWaitTime(txn)); + const auto host = _targeter->findHost(txn, readPrefWithMinOpTime); if (!host.isOK()) { return Shard::HostWithResponse(boost::none, host.getStatus()); } @@ -244,8 +243,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig( ReadPreferenceSetting readPrefWithMinOpTime(readPref); readPrefWithMinOpTime.minOpTime = grid.configOpTime(); - const auto host = _targeter->findHost(readPrefWithMinOpTime, - RemoteCommandTargeter::selectFindHostMaxWaitTime(txn)); + const auto host = _targeter->findHost(txn, readPrefWithMinOpTime); if (!host.isOK()) { return host.getStatus(); } diff --git a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp index 56bfa66b369..dbeb35a673e 100644 --- a/src/mongo/s/commands/cluster_get_last_error_cmd.cpp +++ b/src/mongo/s/commands/cluster_get_last_error_cmd.cpp @@ -117,7 +117,7 @@ public: break; } auto shard = shardStatus.getValue(); - auto swHostAndPort = shard->getTargeter()->findHost(readPref); + auto swHostAndPort = shard->getTargeter()->findHostNoWait(readPref); if (!swHostAndPort.isOK()) { status = swHostAndPort.getStatus(); break; diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp index ea63a7d020a..925edaddf62 100644 --- a/src/mongo/s/commands/cluster_write_cmd.cpp +++ b/src/mongo/s/commands/cluster_write_cmd.cpp @@ -271,7 +271,7 @@ private: if (!shardStatus.isOK()) { return shardStatus.getStatus(); } - auto swHostAndPort = shardStatus.getValue()->getTargeter()->findHost(readPref); + auto swHostAndPort = shardStatus.getValue()->getTargeter()->findHostNoWait(readPref); if (!swHostAndPort.isOK()) { return swHostAndPort.getStatus(); } diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp index 85cc8454dd4..c95a0fe2482 100644 --- a/src/mongo/s/query/async_results_merger.cpp +++ b/src/mongo/s/query/async_results_merger.cpp @@ -685,8 +685,7 @@ Status AsyncResultsMerger::RemoteCursorData::resolveShardIdToHostAndPort( } // TODO: Pass down an OperationContext* to use here. - auto findHostStatus = shard->getTargeter()->findHost( - readPref, RemoteCommandTargeter::selectFindHostMaxWaitTime(nullptr)); + auto findHostStatus = shard->getTargeter()->findHostWithMaxWait(readPref, Seconds{20}); if (!findHostStatus.isOK()) { return findHostStatus.getStatus(); } diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp index cd98e0cc6dc..79b149de6d7 100644 --- a/src/mongo/s/server.cpp +++ b/src/mongo/s/server.cpp @@ -131,6 +131,7 @@ static ExitCode initService(); // prior execution of mongo initializers or the existence of threads. static void cleanupTask() { { + auto serviceContext = getGlobalServiceContext(); Client::initThreadIfNotAlready(); Client& client = cc(); ServiceContext::UniqueOperationContext uniqueTxn; @@ -140,6 +141,9 @@ static void cleanupTask() { txn = uniqueTxn.get(); } + if (serviceContext) + serviceContext->setKillAllOperations(); + auto cursorManager = grid.getCursorManager(); cursorManager->shutdown(); grid.getExecutorPool()->shutdownAndJoin(); diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp index 34f399267f4..6a6395eea13 100644 --- a/src/mongo/s/write_ops/batch_write_exec.cpp +++ b/src/mongo/s/write_ops/batch_write_exec.cpp @@ -190,7 +190,7 @@ void BatchWriteExec::executeBatch(OperationContext* txn, } else { auto shard = shardStatus.getValue(); - auto swHostAndPort = shard->getTargeter()->findHost(readPref); + auto swHostAndPort = shard->getTargeter()->findHostNoWait(readPref); if (!swHostAndPort.isOK()) { // Record a resolve failure |