diff options
Diffstat (limited to 'src/mongo/db')
-rw-r--r-- | src/mongo/db/commands/dbcommands.cpp | 1 | ||||
-rw-r--r-- | src/mongo/db/commands/getmore_cmd.cpp | 7 | ||||
-rw-r--r-- | src/mongo/db/commands/mr.cpp | 3 | ||||
-rw-r--r-- | src/mongo/db/db.cpp | 100 | ||||
-rw-r--r-- | src/mongo/db/pipeline/pipeline_d.cpp | 1 | ||||
-rw-r--r-- | src/mongo/db/read_concern.cpp | 1 | ||||
-rw-r--r-- | src/mongo/db/s/migration_source_manager.cpp | 1 | ||||
-rw-r--r-- | src/mongo/db/s/operation_sharding_state.cpp | 21 | ||||
-rw-r--r-- | src/mongo/db/s/operation_sharding_state.h | 25 | ||||
-rw-r--r-- | src/mongo/db/s/set_shard_version_command.cpp | 101 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state.cpp | 11 | ||||
-rw-r--r-- | src/mongo/db/service_entry_point_mongod.cpp | 3 |
12 files changed, 102 insertions, 173 deletions
diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index ee67ed17ccb..ef4ddac6eb1 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -89,7 +89,6 @@ #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator_global.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/server_parameters.h" #include "mongo/db/stats/storage_stats.h" #include "mongo/db/write_concern.h" diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index 77afd559cb6..f919343bda2 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -52,7 +52,6 @@ #include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/replication_coordinator_global.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/top.h" @@ -66,11 +65,12 @@ namespace mongo { namespace { + MONGO_FP_DECLARE(rsStopGetMoreCmd); -// Failpoint for making getMore not wait for an awaitdata cursor. Allows us to avoid waiting during + +// Failpoint for making getMore not wait for an awaitdata cursor. Allows us to avoid waiting during // tests. MONGO_FP_DECLARE(disableAwaitDataForGetMoreCmd); -} // namespace /** * A command for running getMore() against an existing cursor registered with a CursorManager. @@ -482,4 +482,5 @@ public: } getMoreCmd; +} // namespace } // namespace mongo diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp index c513ccab1f3..580918250a0 100644 --- a/src/mongo/db/commands/mr.cpp +++ b/src/mongo/db/commands/mr.cpp @@ -63,8 +63,6 @@ #include "mongo/db/repl/replication_coordinator_global.h" #include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/s/sharded_connection_info.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" @@ -83,7 +81,6 @@ namespace mongo { -using std::endl; using std::set; using std::shared_ptr; using std::string; diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp index a99c212202e..e1d872e3eb4 100644 --- a/src/mongo/db/db.cpp +++ b/src/mongo/db/db.cpp @@ -177,8 +177,6 @@ using std::vector; using logger::LogComponent; -extern int diagLogging; - namespace { const NamespaceString startupLogCollectionName("local.startup_log"); @@ -455,7 +453,7 @@ void repairDatabasesAndCheckVersion(OperationContext* opCtx) { LOG(1) << "done repairDatabases"; } -void _initWireSpec() { +void initWireSpec() { WireSpec& spec = WireSpec::instance(); spec.isInternalClient = true; @@ -466,7 +464,7 @@ MONGO_FP_DECLARE(shutdownAtStartup); ExitCode _initAndListen(int listenPort) { Client::initThread("initandlisten"); - _initWireSpec(); + initWireSpec(); auto globalServiceContext = checked_cast<ServiceContextMongoD*>(getGlobalServiceContext()); globalServiceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10))); @@ -477,7 +475,8 @@ ExitCode _initAndListen(int listenPort) { return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx)); }); - const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings(); + const repl::ReplSettings& replSettings = + repl::ReplicationCoordinator::get(globalServiceContext)->getSettings(); { ProcessId pid = ProcessId::getCurrent(); @@ -523,8 +522,8 @@ ExitCode _initAndListen(int listenPort) { } #endif - // Warn if we detect configurations for multiple registered storage engines in - // the same configuration file/environment. + // Warn if we detect configurations for multiple registered storage engines in the same + // configuration file/environment. if (serverGlobalParams.parsedOpts.hasField("storage")) { BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage"); invariant(storageElement.isABSONObj()); @@ -605,17 +604,17 @@ ExitCode _initAndListen(int listenPort) { // Start up health log writer thread. HealthLog::get(startupOpCtx.get()).startup(); - uassertStatusOK(getGlobalAuthorizationManager()->initialize(startupOpCtx.get())); + auto const globalAuthzManager = AuthorizationManager::get(globalServiceContext); + uassertStatusOK(globalAuthzManager->initialize(startupOpCtx.get())); - /* this is for security on certain platforms (nonce generation) */ + // This is for security on certain platforms (nonce generation) srand((unsigned)(curTimeMicros64()) ^ (unsigned(uintptr_t(&startupOpCtx)))); - AuthorizationManager* globalAuthzManager = getGlobalAuthorizationManager(); if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) { Status status = verifySystemIndexes(startupOpCtx.get()); if (!status.isOK()) { log() << redact(status); - if (status.code() == ErrorCodes::AuthSchemaIncompatible) { + if (status == ErrorCodes::AuthSchemaIncompatible) { exitCleanly(EXIT_NEED_UPGRADE); } else { quickExit(EXIT_FAILURE); @@ -633,6 +632,7 @@ ExitCode _initAndListen(int listenPort) { << " but startup could not verify schema version: " << status; exitCleanly(EXIT_NEED_UPGRADE); } + if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " @@ -765,6 +765,7 @@ ExitCode _initAndListen(int listenPort) { } globalServiceContext->notifyStartupComplete(); + #ifndef _WIN32 mongo::signalForkSuccess(); #else @@ -817,7 +818,7 @@ MONGO_INITIALIZER_GENERAL(ForkServer, ("EndStartupOptionHandling"), ("default")) * This function should contain the startup "actions" that we take based on the startup config. It * is intended to separate the actions from "storage" and "validation" of our startup configuration. */ -static void startupConfigActions(const std::vector<std::string>& args) { +void startupConfigActions(const std::vector<std::string>& args) { // The "command" option is deprecated. For backward compatibility, still support the "run" // and "dbppath" command. The "run" command is the same as just running mongod, so just // falls through. @@ -968,44 +969,45 @@ MONGO_INITIALIZER_GENERAL(setSSLManagerType, MONGO_NO_PREREQUISITES, ("SSLManage #define __has_feature(x) 0 #endif -// NOTE: This function may be called at any time after -// registerShutdownTask is called below. It must not depend on the -// prior execution of mongo initializers or the existence of threads. -static void shutdownTask() { - auto serviceContext = getGlobalServiceContext(); - +// NOTE: This function may be called at any time after registerShutdownTask is called below. It must +// not depend on the prior execution of mongo initializers or the existence of threads. +void shutdownTask() { Client::initThreadIfNotAlready(); - Client& client = cc(); - ServiceContext::UniqueOperationContext uniqueTxn; - OperationContext* opCtx = client.getOperationContext(); - if (!opCtx && serviceContext->getGlobalStorageEngine()) { - uniqueTxn = client.makeOperationContext(); - opCtx = uniqueTxn.get(); - } + auto const client = Client::getCurrent(); + auto const serviceContext = client->getServiceContext(); - log(LogComponent::kNetwork) << "shutdown: going to close listening sockets..." << endl; + log(LogComponent::kNetwork) << "shutdown: going to close listening sockets..."; ListeningSockets::get()->closeAll(); - log(LogComponent::kNetwork) << "shutdown: going to flush diaglog..." << endl; + log(LogComponent::kNetwork) << "shutdown: going to flush diaglog..."; _diaglog.flush(); - if (opCtx) { + serviceContext->setKillAllOperations(); + + if (serviceContext->getGlobalStorageEngine()) { + ServiceContext::UniqueOperationContext uniqueOpCtx; + OperationContext* opCtx = client->getOperationContext(); + if (!opCtx) { + uniqueOpCtx = client->makeOperationContext(); + opCtx = uniqueOpCtx.get(); + } + if (serverGlobalParams.featureCompatibility.version.load() == ServerGlobalParams::FeatureCompatibility::Version::k34) { log(LogComponent::kReplication) << "shutdown: removing all drop-pending collections..."; - repl::DropPendingCollectionReaper::get(opCtx)->dropCollectionsOlderThan( - opCtx, repl::OpTime::max()); + repl::DropPendingCollectionReaper::get(serviceContext) + ->dropCollectionsOlderThan(opCtx, repl::OpTime::max()); // If we are in fCV 3.4, drop the 'checkpointTimestamp' collection so if we downgrade // and then upgrade again, we do not trust a stale 'checkpointTimestamp'. log(LogComponent::kReplication) << "shutdown: removing checkpointTimestamp collection..."; - Status status = repl::StorageInterface::get(opCtx)->dropCollection( - opCtx, - NamespaceString( - repl::ReplicationConsistencyMarkersImpl::kDefaultCheckpointTimestampNamespace)); - + Status status = + repl::StorageInterface::get(serviceContext) + ->dropCollection(opCtx, + NamespaceString(repl::ReplicationConsistencyMarkersImpl:: + kDefaultCheckpointTimestampNamespace)); if (!status.isOK()) { warning(LogComponent::kReplication) << "shutdown: dropping checkpointTimestamp collection failed: " @@ -1015,22 +1017,19 @@ static void shutdownTask() { // This can wait a long time while we drain the secondary's apply queue, especially if it is // building an index. - repl::ReplicationCoordinator::get(opCtx)->shutdown(opCtx); - } + repl::ReplicationCoordinator::get(serviceContext)->shutdown(opCtx); - if (serviceContext) { - serviceContext->setKillAllOperations(); + ShardingState::get(serviceContext)->shutDown(opCtx); + } - // Shut down the background periodic task runner. - auto runner = serviceContext->getPeriodicRunner(); - if (runner) { - runner->shutdown(); - } + // Shut down the background periodic task runner + if (auto runner = serviceContext->getPeriodicRunner()) { + runner->shutdown(); } ReplicaSetMonitor::shutdown(); - if (auto sr = Grid::get(opCtx)->shardRegistry()) { + if (auto sr = Grid::get(serviceContext)->shardRegistry()) { sr->shutdown(); } @@ -1096,10 +1095,6 @@ static void shutdownTask() { // Shutdown Full-Time Data Capture stopMongoDFTDC(); - if (opCtx) { - ShardingState::get(opCtx)->shutDown(opCtx); - } - HealthLog::get(serviceContext).shutdown(); // We should always be able to acquire the global lock at shutdown. @@ -1120,8 +1115,7 @@ static void shutdownTask() { invariant(LOCK_OK == result); // Global storage engine may not be started in all cases before we exit - - if (serviceContext && serviceContext->getGlobalStorageEngine()) { + if (serviceContext->getGlobalStorageEngine()) { serviceContext->shutdownGlobalStorageEngineCleanly(); } @@ -1130,9 +1124,9 @@ static void shutdownTask() { // the memory and makes leak sanitizer happy. ScriptEngine::dropScopeCache(); - log(LogComponent::kControl) << "now exiting" << endl; + log(LogComponent::kControl) << "now exiting"; - audit::logShutdown(&cc()); + audit::logShutdown(client); } } // namespace diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp index 8285b84e870..0405d78e1b8 100644 --- a/src/mongo/db/pipeline/pipeline_d.cpp +++ b/src/mongo/db/pipeline/pipeline_d.cpp @@ -65,7 +65,6 @@ #include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/s/sharded_connection_info.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/fill_locker_info.h" diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp index b0266a169af..4650a5244a7 100644 --- a/src/mongo/db/read_concern.cpp +++ b/src/mongo/db/read_concern.cpp @@ -44,7 +44,6 @@ #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/server_options.h" #include "mongo/db/server_parameters.h" diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index 95616ee7a18..0e8b9d8355d 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -39,7 +39,6 @@ #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/migration_chunk_cloner_source_legacy.h" #include "mongo/db/s/migration_util.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_state_recovery.h" #include "mongo/s/catalog/sharding_catalog_client.h" diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp index 4b8e6304111..f465cc22a24 100644 --- a/src/mongo/db/s/operation_sharding_state.cpp +++ b/src/mongo/db/s/operation_sharding_state.cpp @@ -130,25 +130,4 @@ void OperationShardingState::_clear() { _ns = NamespaceString(); } -OperationShardingState::IgnoreVersioningBlock::IgnoreVersioningBlock(OperationContext* opCtx, - const NamespaceString& ns) - : _opCtx(opCtx), _ns(ns) { - auto& oss = OperationShardingState::get(opCtx); - _hadOriginalVersion = oss._hasVersion; - if (_hadOriginalVersion) { - _originalVersion = oss.getShardVersion(ns); - } - oss.setShardVersion(ns, ChunkVersion::IGNORED()); -} - -OperationShardingState::IgnoreVersioningBlock::~IgnoreVersioningBlock() { - auto& oss = OperationShardingState::get(_opCtx); - invariant(ChunkVersion::isIgnoredVersion(oss.getShardVersion(_ns))); - if (_hadOriginalVersion) { - oss.setShardVersion(_ns, _originalVersion); - } else { - oss._clear(); - } -} - } // namespace mongo diff --git a/src/mongo/db/s/operation_sharding_state.h b/src/mongo/db/s/operation_sharding_state.h index d4a0e778af1..53ed73f89af 100644 --- a/src/mongo/db/s/operation_sharding_state.h +++ b/src/mongo/db/s/operation_sharding_state.h @@ -32,8 +32,8 @@ #include "mongo/base/disallow_copying.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/s/migration_source_manager.h" #include "mongo/s/chunk_version.h" +#include "mongo/util/concurrency/notification.h" namespace mongo { @@ -51,8 +51,6 @@ class OperationShardingState { MONGO_DISALLOW_COPYING(OperationShardingState); public: - class IgnoreVersioningBlock; - OperationShardingState(); /** @@ -129,25 +127,4 @@ private: std::shared_ptr<Notification<void>> _migrationCriticalSectionSignal; }; -/** - * RAII type that sets the shard version for the current operation to IGNORED in its constructor, - * then restores the original version in its destructor. Used for temporarily disabling shard - * version checking for certain operations, such as multi-updates, that need to be unversioned - * but may be part of a larger group of operations with a single OperationContext where the other - * sub-operations might still require versioning. - */ -class OperationShardingState::IgnoreVersioningBlock { - MONGO_DISALLOW_COPYING(IgnoreVersioningBlock); - -public: - IgnoreVersioningBlock(OperationContext* opCtx, const NamespaceString& ns); - ~IgnoreVersioningBlock(); - -private: - OperationContext* _opCtx; - NamespaceString _ns; - ChunkVersion _originalVersion; - bool _hadOriginalVersion; -}; - } // namespace mongo diff --git a/src/mongo/db/s/set_shard_version_command.cpp b/src/mongo/db/s/set_shard_version_command.cpp index 929332c29d9..561b13b335d 100644 --- a/src/mongo/db/s/set_shard_version_command.cpp +++ b/src/mongo/db/s/set_shard_version_command.cpp @@ -97,7 +97,7 @@ public: "can't issue setShardVersion from 'eval'", !opCtx->getClient()->isInDirectClient()); - auto shardingState = ShardingState::get(opCtx); + auto const shardingState = ShardingState::get(opCtx); uassertStatusOK(shardingState->canAcceptShardedCommands()); // Steps @@ -158,8 +158,8 @@ public: // Step 3 // Validate shardName parameter. - string shardName = cmdObj["shard"].str(); - auto storedShardName = ShardingState::get(opCtx)->getShardName(); + const auto shardName = cmdObj["shard"].str(); + const auto storedShardName = ShardingState::get(opCtx)->getShardName(); uassert(ErrorCodes::BadValue, str::stream() << "received shardName " << shardName << " which differs from stored shardName " @@ -167,45 +167,29 @@ public: storedShardName == shardName); // Validate config connection string parameter. + const auto configdb = cmdObj["configdb"].String(); + uassert(ErrorCodes::BadValue, + "Config server connection string cannot be empty", + !configdb.empty()); - const auto configdb = cmdObj["configdb"].str(); - if (configdb.size() == 0) { - errmsg = "no configdb"; - return false; - } - - auto givenConnStrStatus = ConnectionString::parse(configdb); - uassertStatusOK(givenConnStrStatus); - - const auto& givenConnStr = givenConnStrStatus.getValue(); - if (givenConnStr.type() != ConnectionString::SET) { - errmsg = str::stream() << "given config server string is not of type SET"; - return false; - } - - ConnectionString storedConnStr = ShardingState::get(opCtx)->getConfigServer(opCtx); - if (givenConnStr.getSetName() != storedConnStr.getSetName()) { - errmsg = str::stream() - << "given config server set name: " << givenConnStr.getSetName() - << " differs from known set name: " << storedConnStr.getSetName(); + const auto givenConnStr = uassertStatusOK(ConnectionString::parse(configdb)); + uassert(ErrorCodes::InvalidOptions, + str::stream() << "Given config server string " << givenConnStr.toString() + << " is not of type SET", + givenConnStr.type() == ConnectionString::SET); - return false; - } + const auto storedConnStr = ShardingState::get(opCtx)->getConfigServer(opCtx); + uassert(ErrorCodes::IllegalOperation, + str::stream() << "Given config server set name: " << givenConnStr.getSetName() + << " differs from known set name: " + << storedConnStr.getSetName(), + givenConnStr.getSetName() == storedConnStr.getSetName()); // Validate namespace parameter. - - const string ns = cmdObj["setShardVersion"].valuestrsafe(); - if (ns.size() == 0) { - errmsg = "need to specify namespace"; - return false; - } - - // Backwards compatibility for SERVER-23119 - const NamespaceString nss(ns); - if (!nss.isValid()) { - warning() << "Invalid namespace used for setShardVersion: " << ns; - return true; - } + const NamespaceString nss(cmdObj["setShardVersion"].String()); + uassert(ErrorCodes::InvalidNamespace, + str::stream() << "Invalid namespace " << nss.ns(), + nss.isValid()); // Validate chunk version parameter. const ChunkVersion requestedVersion = @@ -213,7 +197,7 @@ public: // Step 4 - const ChunkVersion connectionVersion = info->getVersion(ns); + const ChunkVersion connectionVersion = info->getVersion(nss.ns()); connectionVersion.addToBSON(result, "oldVersion"); { @@ -252,15 +236,15 @@ public: // A migration occurred. if (connectionVersion < collectionShardVersion && connectionVersion.epoch() == collectionShardVersion.epoch()) { - info->setVersion(ns, requestedVersion); + info->setVersion(nss.ns(), requestedVersion); } // The collection was dropped and recreated. else if (authoritative) { - info->setVersion(ns, requestedVersion); + info->setVersion(nss.ns(), requestedVersion); } else { - result.append("ns", ns); + result.append("ns", nss.ns()); result.appendBool("need_authoritative", true); - errmsg = "verifying drop on '" + ns + "'"; + errmsg = str::stream() << "verifying drop on '" << nss.ns() << "'"; return false; } } @@ -276,7 +260,7 @@ public: if (isDropRequested) { if (!authoritative) { result.appendBool("need_authoritative", true); - result.append("ns", ns); + result.append("ns", nss.ns()); collectionShardVersion.addToBSON(result, "globalVersion"); errmsg = "dropping needs to be authoritative"; return false; @@ -292,8 +276,8 @@ public: if (requestedVersion < connectionVersion && requestedVersion.epoch() == connectionVersion.epoch()) { errmsg = str::stream() << "this connection already had a newer version " - << "of collection '" << ns << "'"; - result.append("ns", ns); + << "of collection '" << nss.ns() << "'"; + result.append("ns", nss.ns()); requestedVersion.addToBSON(result, "newVersion"); collectionShardVersion.addToBSON(result, "globalVersion"); return false; @@ -315,8 +299,8 @@ public: } errmsg = str::stream() << "shard global version for collection is higher " - << "than trying to set to '" << ns << "'"; - result.append("ns", ns); + << "than trying to set to '" << nss.ns() << "'"; + result.append("ns", nss.ns()); requestedVersion.addToBSON(result, "version"); collectionShardVersion.addToBSON(result, "globalVersion"); result.appendBool("reloadConfig", true); @@ -339,9 +323,9 @@ public: } // need authoritative for first look - result.append("ns", ns); + result.append("ns", nss.ns()); result.appendBool("need_authoritative", true); - errmsg = "first time for collection '" + ns + "'"; + errmsg = str::stream() << "first time for collection '" << nss.ns() << "'"; return false; } @@ -365,14 +349,15 @@ public: if (!status.isOK()) { // The reload itself was interrupted or confused here - errmsg = str::stream() - << "could not refresh metadata for " << ns << " with requested shard version " - << requestedVersion.toString() << ", stored shard version is " - << currVersion.toString() << causedBy(redact(status)); + errmsg = str::stream() << "could not refresh metadata for " << nss.ns() + << " with requested shard version " + << requestedVersion.toString() + << ", stored shard version is " << currVersion.toString() + << causedBy(redact(status)); warning() << errmsg; - result.append("ns", ns); + result.append("ns", nss.ns()); requestedVersion.addToBSON(result, "version"); currVersion.addToBSON(result, "globalVersion"); result.appendBool("reloadConfig", true); @@ -382,7 +367,7 @@ public: // We reloaded a version that doesn't match the version mongos was trying to // set. errmsg = str::stream() << "requested shard version differs from" - << " config shard version for " << ns + << " config shard version for " << nss.ns() << ", requested version is " << requestedVersion.toString() << " but found version " << currVersion.toString(); @@ -391,7 +376,7 @@ public: // WARNING: the exact fields below are important for compatibility with mongos // version reload. - result.append("ns", ns); + result.append("ns", nss.ns()); currVersion.addToBSON(result, "globalVersion"); // If this was a reset of a collection or the last chunk moved out, inform mongos to @@ -411,7 +396,7 @@ public: } } - info->setVersion(ns, requestedVersion); + info->setVersion(nss.ns(), requestedVersion); return true; } diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index b775ca6ae0b..f091aec2d48 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -165,8 +165,8 @@ string ShardingState::getShardName() { void ShardingState::shutDown(OperationContext* opCtx) { stdx::unique_lock<stdx::mutex> lk(_mutex); if (enabled()) { - grid.getExecutorPool()->shutdownAndJoin(); - grid.catalogClient()->shutDown(opCtx); + Grid::get(opCtx)->getExecutorPool()->shutdownAndJoin(); + Grid::get(opCtx)->catalogClient()->shutDown(opCtx); } } @@ -184,7 +184,7 @@ Status ShardingState::updateConfigServerOpTimeFromMetadata(OperationContext* opC return Status(ErrorCodes::Unauthorized, "Unauthorized to update config opTime"); } - grid.advanceConfigOpTime(*opTime); + Grid::get(opCtx)->advanceConfigOpTime(*opTime); } return Status::OK(); @@ -307,7 +307,8 @@ Status ShardingState::initializeFromShardIdentity(OperationContext* opCtx, invariant(!_shardName.empty()); fassert(40372, _shardName == shardIdentity.getShardName()); - auto prevConfigsvrConnStr = grid.shardRegistry()->getConfigServerConnectionString(); + auto prevConfigsvrConnStr = + Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString(); invariant(prevConfigsvrConnStr.type() == ConnectionString::SET); fassert(40373, prevConfigsvrConnStr.getSetName() == configSvrConnStr.getSetName()); @@ -549,7 +550,7 @@ void ShardingState::appendInfo(OperationContext* opCtx, BSONObjBuilder& builder) stdx::lock_guard<stdx::mutex> lk(_mutex); builder.append("configServer", - grid.shardRegistry()->getConfigServerConnectionString().toString()); + Grid::get(opCtx)->shardRegistry()->getConfigServerConnectionString().toString()); builder.append("shardName", _shardName); builder.append("clusterId", _clusterId); diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp index 3c2c1d89998..58d2ed0dcea 100644 --- a/src/mongo/db/service_entry_point_mongod.cpp +++ b/src/mongo/db/service_entry_point_mongod.cpp @@ -1082,14 +1082,13 @@ DbResponse ServiceEntryPointMongod::handleRequest(OperationContext* opCtx, const shouldLogOpDebug = true; } else { if (!opCtx->getClient()->isInDirectClient()) { - const ShardedConnectionInfo* connInfo = ShardedConnectionInfo::get(&c, false); uassert(18663, str::stream() << "legacy writeOps not longer supported for " << "versioned connections, ns: " << nsString.ns() << ", op: " << networkOpToString(op), - connInfo == NULL); + !ShardedConnectionInfo::get(&c, false)); } if (!nsString.isValid()) { |