diff options
author | Esha Maharishi <esha.maharishi@mongodb.com> | 2017-08-08 09:07:08 -0400 |
---|---|---|
committer | Esha Maharishi <esha.maharishi@mongodb.com> | 2017-08-08 15:30:56 -0400 |
commit | a70b206b4d056de139e649e17d7c1ca3a5c79a52 (patch) | |
tree | de009d0533bf66824190edc4c39977bff4199b70 /src/mongo | |
parent | 7bc94c5f02e9d32b7faca61013f8b70b92c1437f (diff) | |
download | mongo-a70b206b4d056de139e649e17d7c1ca3a5c79a52.tar.gz |
Revert "SERVER-30219 Make metadata commands use kLocalReadConcern"
This reverts commit e101217eabfaa8d10c1c6cce969fa773ead966f2.
Diffstat (limited to 'src/mongo')
19 files changed, 111 insertions, 216 deletions
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp index 642380a4a56..227c6b05bc5 100644 --- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp +++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp @@ -131,9 +131,7 @@ public: auto const catalogCache = Grid::get(opCtx)->catalogCache(); auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); - auto dbType = uassertStatusOK(catalogClient->getDatabase( - opCtx, dbname, repl::ReadConcernLevel::kLocalReadConcern)) - .value; + auto dbType = uassertStatusOK(catalogClient->getDatabase(opCtx, dbname)).value; const std::string to = movePrimaryRequest.getTo().toString(); @@ -168,13 +166,10 @@ public: << " to: " << toShard->toString(); const std::string whyMessage(str::stream() << "Moving primary shard of " << dbname); - - // ReplSetDistLockManager uses local read concern and majority write concern by default. auto scopedDistLock = uassertStatusOK(catalogClient->getDistLockManager()->lock( opCtx, dbname + "-movePrimary", whyMessage, DistLockManager::kDefaultLockTimeout)); - const auto shardedColls = - getAllShardedCollectionsForDb(opCtx, dbname, repl::ReadConcernLevel::kLocalReadConcern); + const auto shardedColls = getAllShardedCollectionsForDb(opCtx, dbname); // Record start in changelog uassertStatusOK(catalogClient->logChange( @@ -222,10 +217,7 @@ public: // Update the new primary in the config server metadata { - auto dbt = - uassertStatusOK(catalogClient->getDatabase( - opCtx, dbname, repl::ReadConcernLevel::kLocalReadConcern)) - .value; + auto dbt = uassertStatusOK(catalogClient->getDatabase(opCtx, dbname)).value; dbt.setPrimary(toShard->getId()); uassertStatusOK(catalogClient->updateDatabase(opCtx, dbname, dbt)); } diff --git a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp index 8e5863b4f71..d041d40aabd 100644 --- a/src/mongo/db/s/config/configsvr_shard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_shard_collection_command.cpp @@ -748,11 +748,9 @@ public: // Until all metadata commands are on the config server, the CatalogCache on the config // server may be stale. Read the database entry directly rather than purging and reloading // the database into the CatalogCache, which is very expensive. - auto dbType = - uassertStatusOK( - Grid::get(opCtx)->catalogClient()->getDatabase( - opCtx, nss.db().toString(), repl::ReadConcernLevel::kLocalReadConcern)) - .value; + auto dbType = uassertStatusOK(Grid::get(opCtx)->catalogClient()->getDatabase( + opCtx, nss.db().toString())) + .value; uassert(ErrorCodes::IllegalOperation, str::stream() << "sharding not enabled for db " << nss.db(), dbType.getSharded()); diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index 1ac29ac2151..59ee1da26c5 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -346,8 +346,7 @@ void ShardServerCatalogCacheLoader::onStepUp() { std::shared_ptr<Notification<void>> ShardServerCatalogCacheLoader::getChunksSince( const NamespaceString& nss, ChunkVersion version, - stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn, - const repl::ReadConcernLevel& readConcern) { + stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn) { long long currentTerm; bool isPrimary; { diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.h b/src/mongo/db/s/shard_server_catalog_cache_loader.h index 6c9da34994e..8eb863329c2 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.h +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.h @@ -104,9 +104,8 @@ public: std::shared_ptr<Notification<void>> getChunksSince( const NamespaceString& nss, ChunkVersion version, - stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn) + override; private: // Differentiates the server's role in the replica set so that the chunk loader knows whether to diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp index 15efe9e5637..413a39af5b1 100644 --- a/src/mongo/s/balancer_configuration.cpp +++ b/src/mongo/s/balancer_configuration.cpp @@ -131,10 +131,9 @@ bool BalancerConfiguration::waitForDelete() const { return _balancerSettings.waitForDelete(); } -Status BalancerConfiguration::refreshAndCheck(OperationContext* opCtx, - const repl::ReadConcernLevel& readConcern) { +Status BalancerConfiguration::refreshAndCheck(OperationContext* opCtx) { // Balancer configuration - Status balancerSettingsStatus = _refreshBalancerSettings(opCtx, readConcern); + Status balancerSettingsStatus = _refreshBalancerSettings(opCtx); if (!balancerSettingsStatus.isOK()) { return {balancerSettingsStatus.code(), str::stream() << "Failed to refresh the balancer settings due to " @@ -160,12 +159,11 @@ Status BalancerConfiguration::refreshAndCheck(OperationContext* opCtx, return Status::OK(); } -Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx, - const repl::ReadConcernLevel& readConcern) { +Status BalancerConfiguration::_refreshBalancerSettings(OperationContext* opCtx) { BalancerSettingsType settings = BalancerSettingsType::createDefault(); - auto settingsObjStatus = Grid::get(opCtx)->catalogClient()->getGlobalSettings( - opCtx, BalancerSettingsType::kKey, readConcern); + auto settingsObjStatus = + Grid::get(opCtx)->catalogClient()->getGlobalSettings(opCtx, BalancerSettingsType::kKey); if (settingsObjStatus.isOK()) { auto settingsStatus = BalancerSettingsType::fromBSON(settingsObjStatus.getValue()); if (!settingsStatus.isOK()) { diff --git a/src/mongo/s/balancer_configuration.h b/src/mongo/s/balancer_configuration.h index 9a4de01c9cc..71a3832f537 100644 --- a/src/mongo/s/balancer_configuration.h +++ b/src/mongo/s/balancer_configuration.h @@ -33,7 +33,6 @@ #include <cstdint> #include "mongo/base/disallow_copying.h" -#include "mongo/db/repl/read_concern_args.h" #include "mongo/platform/atomic_word.h" #include "mongo/s/migration_secondary_throttle_options.h" #include "mongo/stdx/mutex.h" @@ -255,18 +254,14 @@ public: * This method is thread-safe but it doesn't make sense to be called from more than one thread * at a time. */ - Status refreshAndCheck( - OperationContext* opCtx, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern); + Status refreshAndCheck(OperationContext* opCtx); private: /** * Reloads the balancer configuration from the settings document. Fails if the settings document * cannot be read, in which case the values will remain unchanged. */ - Status _refreshBalancerSettings( - OperationContext* opCtx, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern); + Status _refreshBalancerSettings(OperationContext* opCtx); /** * Reloads the chunk sizes configuration from the settings document. Fails if the settings diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h index 81eb5c0a238..2069a90e5d3 100644 --- a/src/mongo/s/catalog/sharding_catalog_client.h +++ b/src/mongo/s/catalog/sharding_catalog_client.h @@ -143,11 +143,8 @@ public: * the failure. These are some of the known failures: * - NamespaceNotFound - database does not exist */ - virtual StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase( - OperationContext* opCtx, - const std::string& dbName, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) = 0; + virtual StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx, + const std::string& dbName) = 0; /** * Retrieves the metadata for a given collection, if it exists. @@ -160,10 +157,7 @@ public: * - NamespaceNotFound - collection does not exist */ virtual StatusWith<repl::OpTimeWith<CollectionType>> getCollection( - OperationContext* opCtx, - const std::string& collNs, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) = 0; + OperationContext* opCtx, const std::string& collNs) = 0; /** * Retrieves all collections undera specified database (or in the system). @@ -180,9 +174,7 @@ public: virtual Status getCollections(OperationContext* opCtx, const std::string* dbName, std::vector<CollectionType>* collections, - repl::OpTime* optime, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) = 0; + repl::OpTime* optime) = 0; /** * Drops the specified collection from the collection metadata store. @@ -315,11 +307,7 @@ public: * Returns ErrorCodes::NoMatchingDocument if no such key exists or the BSON content of the * setting otherwise. */ - virtual StatusWith<BSONObj> getGlobalSettings( - OperationContext* opCtx, - StringData key, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) = 0; + virtual StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) = 0; /** * Returns the contents of the config.version document - containing the current cluster schema @@ -360,9 +348,7 @@ public: virtual Status insertConfigDocument(OperationContext* opCtx, const std::string& ns, const BSONObj& doc, - const WriteConcernOptions& writeConcern, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) = 0; + const WriteConcernOptions& writeConcern) = 0; /** * Updates a single document in the specified namespace on the config server. The document must diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp index e246faa1f0c..4e013100eb2 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp @@ -381,7 +381,7 @@ StatusWith<ShardDrainingStatus> ShardingCatalogClientImpl::removeShard(Operation } StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabase( - OperationContext* opCtx, const std::string& dbName, const repl::ReadConcernLevel& readConcern) { + OperationContext* opCtx, const std::string& dbName) { if (!NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)) { return {ErrorCodes::InvalidNamespace, stream() << dbName << " is not a valid db name"}; } @@ -396,12 +396,12 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas return repl::OpTimeWith<DatabaseType>(dbt); } - auto result = _fetchDatabaseMetadata(opCtx, dbName, kConfigReadSelector, readConcern); + auto result = _fetchDatabaseMetadata(opCtx, dbName, kConfigReadSelector); if (result == ErrorCodes::NamespaceNotFound) { // If we failed to find the database metadata on the 'nearest' config server, try again // against the primary, in case the database was recently created. result = _fetchDatabaseMetadata( - opCtx, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, readConcern); + opCtx, dbName, ReadPreferenceSetting{ReadPreference::PrimaryOnly}); if (!result.isOK() && (result != ErrorCodes::NamespaceNotFound)) { return {result.getStatus().code(), str::stream() << "Could not confirm non-existence of database " << dbName @@ -414,15 +414,12 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::getDatabas } StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchDatabaseMetadata( - OperationContext* opCtx, - const std::string& dbName, - const ReadPreferenceSetting& readPref, - const repl::ReadConcernLevel& readConcern) { + OperationContext* opCtx, const std::string& dbName, const ReadPreferenceSetting& readPref) { dassert(dbName != "admin" && dbName != "config"); auto findStatus = _exhaustiveFindOnConfig(opCtx, readPref, - readConcern, + repl::ReadConcernLevel::kMajorityReadConcern, NamespaceString(DatabaseType::ConfigNS), BSON(DatabaseType::name(dbName)), BSONObj(), @@ -447,10 +444,10 @@ StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientImpl::_fetchData } StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getCollection( - OperationContext* opCtx, const std::string& collNs, const repl::ReadConcernLevel& readConcern) { + OperationContext* opCtx, const std::string& collNs) { auto statusFind = _exhaustiveFindOnConfig(opCtx, kConfigReadSelector, - readConcern, + repl::ReadConcernLevel::kMajorityReadConcern, NamespaceString(CollectionType::ConfigNS), BSON(CollectionType::fullNs(collNs)), BSONObj(), @@ -485,8 +482,7 @@ StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientImpl::getColle Status ShardingCatalogClientImpl::getCollections(OperationContext* opCtx, const std::string* dbName, std::vector<CollectionType>* collections, - OpTime* opTime, - const repl::ReadConcernLevel& readConcern) { + OpTime* opTime) { BSONObjBuilder b; if (dbName) { invariant(!dbName->empty()); @@ -496,7 +492,7 @@ Status ShardingCatalogClientImpl::getCollections(OperationContext* opCtx, auto findStatus = _exhaustiveFindOnConfig(opCtx, kConfigReadSelector, - readConcern, + repl::ReadConcernLevel::kMajorityReadConcern, NamespaceString(CollectionType::ConfigNS), b.obj(), BSONObj(), @@ -720,11 +716,11 @@ Status ShardingCatalogClientImpl::dropCollection(OperationContext* opCtx, return Status::OK(); } -StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings( - OperationContext* opCtx, StringData key, const repl::ReadConcernLevel& readConcern) { +StatusWith<BSONObj> ShardingCatalogClientImpl::getGlobalSettings(OperationContext* opCtx, + StringData key) { auto findStatus = _exhaustiveFindOnConfig(opCtx, kConfigReadSelector, - readConcern, + repl::ReadConcernLevel::kMajorityReadConcern, kSettingsNamespace, BSON("_id" << key), BSONObj(), @@ -1159,8 +1155,7 @@ void ShardingCatalogClientImpl::writeConfigServerDirect(OperationContext* opCtx, Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx, const std::string& ns, const BSONObj& doc, - const WriteConcernOptions& writeConcern, - const repl::ReadConcernLevel& readConcern) { + const WriteConcernOptions& writeConcern) { const NamespaceString nss(ns); invariant(nss.db() == NamespaceString::kAdminDb || nss.db() == NamespaceString::kConfigDb); @@ -1199,7 +1194,7 @@ Status ShardingCatalogClientImpl::insertConfigDocument(OperationContext* opCtx, auto fetchDuplicate = _exhaustiveFindOnConfig(opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - readConcern, + repl::ReadConcernLevel::kMajorityReadConcern, nss, idField.wrap(), BSONObj(), diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h index 8d7e8eb6c1d..e184c078fff 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.h +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h @@ -89,24 +89,16 @@ public: StatusWith<ShardDrainingStatus> removeShard(OperationContext* opCtx, const ShardId& name) override; - StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase( - OperationContext* opCtx, - const std::string& dbName, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx, + const std::string& dbName) override; - StatusWith<repl::OpTimeWith<CollectionType>> getCollection( - OperationContext* opCtx, - const std::string& collNs, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx, + const std::string& collNs) override; Status getCollections(OperationContext* opCtx, const std::string* dbName, std::vector<CollectionType>* collections, - repl::OpTime* optime, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + repl::OpTime* optime) override; Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override; @@ -148,11 +140,7 @@ public: const WriteConcernOptions& writeConcern, repl::ReadConcernLevel readConcern) override; - StatusWith<BSONObj> getGlobalSettings( - OperationContext* opCtx, - StringData key, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) override; StatusWith<VersionType> getConfigVersion(OperationContext* opCtx, repl::ReadConcernLevel readConcern) override; @@ -164,9 +152,7 @@ public: Status insertConfigDocument(OperationContext* opCtx, const std::string& ns, const BSONObj& doc, - const WriteConcernOptions& writeConcern, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + const WriteConcernOptions& writeConcern) override; StatusWith<bool> updateConfigDocument(OperationContext* opCtx, const std::string& ns, @@ -255,10 +241,7 @@ private: * given read preference. Returns NamespaceNotFound if no database metadata is found. */ StatusWith<repl::OpTimeWith<DatabaseType>> _fetchDatabaseMetadata( - OperationContext* opCtx, - const std::string& dbName, - const ReadPreferenceSetting& readPref, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern); + OperationContext* opCtx, const std::string& dbName, const ReadPreferenceSetting& readPref); /** * Best effort method, which logs diagnostic events on the config server. If the config server diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp index e24b1f5854b..1df6313afec 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp @@ -78,20 +78,19 @@ Status ShardingCatalogClientMock::updateDatabase(OperationContext* opCtx, } StatusWith<repl::OpTimeWith<DatabaseType>> ShardingCatalogClientMock::getDatabase( - OperationContext* opCtx, const string& dbName, const repl::ReadConcernLevel& readConcern) { + OperationContext* opCtx, const string& dbName) { return {ErrorCodes::InternalError, "Method not implemented"}; } StatusWith<repl::OpTimeWith<CollectionType>> ShardingCatalogClientMock::getCollection( - OperationContext* opCtx, const string& collNs, const repl::ReadConcernLevel& readConcern) { + OperationContext* opCtx, const string& collNs) { return {ErrorCodes::InternalError, "Method not implemented"}; } Status ShardingCatalogClientMock::getCollections(OperationContext* opCtx, const string* dbName, vector<CollectionType>* collections, - repl::OpTime* optime, - const repl::ReadConcernLevel& readConcern) { + repl::OpTime* optime) { return {ErrorCodes::InternalError, "Method not implemented"}; } @@ -167,8 +166,8 @@ Status ShardingCatalogClientMock::logChange(OperationContext* opCtx, return {ErrorCodes::InternalError, "Method not implemented"}; } -StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings( - OperationContext* opCtx, StringData key, const repl::ReadConcernLevel& readConcern) { +StatusWith<BSONObj> ShardingCatalogClientMock::getGlobalSettings(OperationContext* opCtx, + StringData key) { return {ErrorCodes::InternalError, "Method not implemented"}; } @@ -184,8 +183,7 @@ void ShardingCatalogClientMock::writeConfigServerDirect(OperationContext* opCtx, Status ShardingCatalogClientMock::insertConfigDocument(OperationContext* opCtx, const std::string& ns, const BSONObj& doc, - const WriteConcernOptions& writeConcern, - const repl::ReadConcernLevel& readConcern) { + const WriteConcernOptions& writeConcern) { return {ErrorCodes::InternalError, "Method not implemented"}; } diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h index 248c992d60f..6189ad67660 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.h +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h @@ -54,24 +54,16 @@ public: const std::string& dbName, const DatabaseType& db) override; - StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase( - OperationContext* opCtx, - const std::string& dbName, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + StatusWith<repl::OpTimeWith<DatabaseType>> getDatabase(OperationContext* opCtx, + const std::string& dbName) override; - StatusWith<repl::OpTimeWith<CollectionType>> getCollection( - OperationContext* opCtx, - const std::string& collNs, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + StatusWith<repl::OpTimeWith<CollectionType>> getCollection(OperationContext* opCtx, + const std::string& collNs) override; Status getCollections(OperationContext* opCtx, const std::string* dbName, std::vector<CollectionType>* collections, - repl::OpTime* optime, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + repl::OpTime* optime) override; Status dropCollection(OperationContext* opCtx, const NamespaceString& ns) override; @@ -124,11 +116,7 @@ public: const BSONObj& detail, const WriteConcernOptions& writeConcern) override; - StatusWith<BSONObj> getGlobalSettings( - OperationContext* opCtx, - StringData key, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + StatusWith<BSONObj> getGlobalSettings(OperationContext* opCtx, StringData key) override; StatusWith<VersionType> getConfigVersion(OperationContext* opCtx, repl::ReadConcernLevel readConcern) override; @@ -140,9 +128,7 @@ public: Status insertConfigDocument(OperationContext* opCtx, const std::string& ns, const BSONObj& doc, - const WriteConcernOptions& writeConcern, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + const WriteConcernOptions& writeConcern) override; StatusWith<bool> updateConfigDocument(OperationContext* opCtx, const std::string& ns, diff --git a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp index 09b6a611b2a..017ca2f8481 100644 --- a/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp +++ b/src/mongo/s/catalog/sharding_catalog_manager_collection_operations.cpp @@ -84,8 +84,7 @@ ChunkVersion createFirstChunks(OperationContext* opCtx, const ShardKeyPattern& shardKeyPattern, const ShardId& primaryShardId, const std::vector<BSONObj>& initPoints, - const bool distributeInitialChunks, - repl::ReadConcernLevel readConcern) { + const bool distributeInitialChunks) { const KeyPattern keyPattern = shardKeyPattern.getKeyPattern(); @@ -110,8 +109,7 @@ ChunkVersion createFirstChunks(OperationContext* opCtx, // Refresh the balancer settings to ensure the chunk size setting, which is sent as part of // the splitVector command and affects the number of chunks returned, has been loaded. - uassertStatusOK( - Grid::get(opCtx)->getBalancerConfiguration()->refreshAndCheck(opCtx, readConcern)); + uassertStatusOK(Grid::get(opCtx)->getBalancerConfiguration()->refreshAndCheck(opCtx)); if (numObjects > 0) { splitPoints = uassertStatusOK(shardutil::selectChunkSplitPoints( @@ -173,8 +171,7 @@ ChunkVersion createFirstChunks(OperationContext* opCtx, opCtx, ChunkType::ConfigNS, chunk.toConfigBSON(), - ShardingCatalogClient::kMajorityWriteConcern, - readConcern)); + ShardingCatalogClient::kMajorityWriteConcern)); } return version; @@ -228,11 +225,7 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx, const auto catalogClient = Grid::get(opCtx)->catalogClient(); const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); - auto dbEntry = - uassertStatusOK(catalogClient->getDatabase( - opCtx, nsToDatabase(ns), repl::ReadConcernLevel::kLocalReadConcern)) - .value; - + auto dbEntry = uassertStatusOK(catalogClient->getDatabase(opCtx, nsToDatabase(ns))).value; auto dbPrimaryShardId = dbEntry.getPrimary(); const auto primaryShard = uassertStatusOK(shardRegistry->getShard(opCtx, dbPrimaryShardId)); @@ -267,13 +260,8 @@ void ShardingCatalogManager::shardCollection(OperationContext* opCtx, ->makeFromBSON(defaultCollation)); } - const auto& collVersion = createFirstChunks(opCtx, - nss, - fieldsAndOrder, - dbPrimaryShardId, - initPoints, - distributeInitialChunks, - repl::ReadConcernLevel::kLocalReadConcern); + const auto& collVersion = createFirstChunks( + opCtx, nss, fieldsAndOrder, dbPrimaryShardId, initPoints, distributeInitialChunks); { CollectionType coll; diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index 26f95a0f635..1cba448d443 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -125,13 +125,11 @@ StatusWith<CachedDatabaseInfo> CatalogCache::getDatabase(OperationContext* opCtx } StatusWith<CachedCollectionRoutingInfo> CatalogCache::getCollectionRoutingInfo( - OperationContext* opCtx, - const NamespaceString& nss, - const repl::ReadConcernLevel& readConcern) { + OperationContext* opCtx, const NamespaceString& nss) { while (true) { std::shared_ptr<DatabaseInfoEntry> dbEntry; try { - dbEntry = _getDatabase(opCtx, nss.db(), readConcern); + dbEntry = _getDatabase(opCtx, nss.db()); } catch (const DBException& ex) { return ex.toStatus(); } @@ -163,7 +161,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::getCollectionRoutingInfo( refreshNotification = (collEntry.refreshCompletionNotification = std::make_shared<Notification<Status>>()); _scheduleCollectionRefresh_inlock( - dbEntry, std::move(collEntry.routingInfo), nss, 1, readConcern); + dbEntry, std::move(collEntry.routingInfo), nss, 1); } // Wait on the notification outside of the mutex @@ -278,8 +276,8 @@ void CatalogCache::purgeAllDatabases() { _databases.clear(); } -std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase( - OperationContext* opCtx, StringData dbName, const repl::ReadConcernLevel& readConcern) { +std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase(OperationContext* opCtx, + StringData dbName) { stdx::lock_guard<stdx::mutex> lg(_mutex); auto it = _databases.find(dbName); @@ -292,15 +290,14 @@ std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase( const auto dbNameCopy = dbName.toString(); // Load the database entry - const auto opTimeWithDb = - uassertStatusOK(catalogClient->getDatabase(opCtx, dbNameCopy, readConcern)); + const auto opTimeWithDb = uassertStatusOK(catalogClient->getDatabase(opCtx, dbNameCopy)); const auto& dbDesc = opTimeWithDb.value; // Load the sharded collections entries std::vector<CollectionType> collections; repl::OpTime collLoadConfigOptime; - uassertStatusOK(catalogClient->getCollections( - opCtx, &dbNameCopy, &collections, &collLoadConfigOptime, readConcern)); + uassertStatusOK( + catalogClient->getCollections(opCtx, &dbNameCopy, &collections, &collLoadConfigOptime)); StringMap<CollectionRoutingInfoEntry> collectionEntries; for (const auto& coll : collections) { @@ -319,15 +316,14 @@ void CatalogCache::_scheduleCollectionRefresh_inlock( std::shared_ptr<DatabaseInfoEntry> dbEntry, std::shared_ptr<ChunkManager> existingRoutingInfo, const NamespaceString& nss, - int refreshAttempt, - const repl::ReadConcernLevel& readConcern) { + int refreshAttempt) { Timer t; const ChunkVersion startingCollectionVersion = (existingRoutingInfo ? existingRoutingInfo->getVersion() : ChunkVersion::UNSHARDED()); const auto refreshFailed_inlock = - [ this, t, dbEntry, nss, refreshAttempt, readConcern ](const Status& status) noexcept { + [ this, t, dbEntry, nss, refreshAttempt ](const Status& status) noexcept { log() << "Refresh for collection " << nss << " took " << t.millis() << " ms and failed" << causedBy(redact(status)); @@ -340,8 +336,7 @@ void CatalogCache::_scheduleCollectionRefresh_inlock( // refresh again if (status == ErrorCodes::ConflictingOperationInProgress && refreshAttempt < kMaxInconsistentRoutingInfoRefreshAttempts) { - _scheduleCollectionRefresh_inlock( - dbEntry, nullptr, nss, refreshAttempt + 1, readConcern); + _scheduleCollectionRefresh_inlock(dbEntry, nullptr, nss, refreshAttempt + 1); } else { // Leave needsRefresh to true so that any subsequent get attempts will kick off // another round of refresh @@ -351,7 +346,7 @@ void CatalogCache::_scheduleCollectionRefresh_inlock( }; const auto refreshCallback = - [ this, t, dbEntry, nss, existingRoutingInfo, refreshFailed_inlock, readConcern ]( + [ this, t, dbEntry, nss, existingRoutingInfo, refreshFailed_inlock ]( OperationContext * opCtx, StatusWith<CatalogCacheLoader::CollectionAndChangedChunks> swCollAndChunks) noexcept { std::shared_ptr<ChunkManager> newRoutingInfo; @@ -391,7 +386,7 @@ void CatalogCache::_scheduleCollectionRefresh_inlock( << startingCollectionVersion; try { - _cacheLoader.getChunksSince(nss, startingCollectionVersion, refreshCallback, readConcern); + _cacheLoader.getChunksSince(nss, startingCollectionVersion, refreshCallback); } catch (const DBException& ex) { const auto status = ex.toStatus(); diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h index bb705993f5f..00f3465d4c3 100644 --- a/src/mongo/s/catalog_cache.h +++ b/src/mongo/s/catalog_cache.h @@ -76,10 +76,8 @@ public: * with the primary shard for the specified database. If an error occurs loading the metadata * returns a failed status. */ - StatusWith<CachedCollectionRoutingInfo> getCollectionRoutingInfo( - OperationContext* opCtx, - const NamespaceString& nss, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern); + StatusWith<CachedCollectionRoutingInfo> getCollectionRoutingInfo(OperationContext* opCtx, + const NamespaceString& nss); StatusWith<CachedCollectionRoutingInfo> getCollectionRoutingInfo(OperationContext* opCtx, StringData ns); @@ -157,21 +155,16 @@ private: * Ensures that the specified database is in the cache, loading it if necessary. If the database * was not in cache, all the sharded collections will be in the 'needsRefresh' state. */ - std::shared_ptr<DatabaseInfoEntry> _getDatabase( - OperationContext* opCtx, - StringData dbName, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern); + std::shared_ptr<DatabaseInfoEntry> _getDatabase(OperationContext* opCtx, StringData dbName); /** * Non-blocking call which schedules an asynchronous refresh for the specified namespace. The * namespace must be in the 'needRefresh' state. */ - void _scheduleCollectionRefresh_inlock( - std::shared_ptr<DatabaseInfoEntry> dbEntry, - std::shared_ptr<ChunkManager> existingRoutingInfo, - const NamespaceString& nss, - int refreshAttempt, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern); + void _scheduleCollectionRefresh_inlock(std::shared_ptr<DatabaseInfoEntry> dbEntry, + std::shared_ptr<ChunkManager> existingRoutingInfo, + const NamespaceString& nss, + int refreshAttempt); // Interface from which chunks will be retrieved CatalogCacheLoader& _cacheLoader; diff --git a/src/mongo/s/catalog_cache_loader.h b/src/mongo/s/catalog_cache_loader.h index 3b674568a4f..8f2b2d3067d 100644 --- a/src/mongo/s/catalog_cache_loader.h +++ b/src/mongo/s/catalog_cache_loader.h @@ -33,7 +33,6 @@ #include "mongo/base/disallow_copying.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/db/repl/read_concern_args.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/chunk_version.h" @@ -129,9 +128,8 @@ public: virtual std::shared_ptr<Notification<void>> getChunksSince( const NamespaceString& nss, ChunkVersion version, - stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) = 0; + stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> + callbackFn) = 0; /** * Only used for unit-tests, clears a previously-created catalog cache loader from the specified diff --git a/src/mongo/s/commands/cluster_commands_helpers.cpp b/src/mongo/s/commands/cluster_commands_helpers.cpp index ca89dbf667f..b4f38d1ebe7 100644 --- a/src/mongo/s/commands/cluster_commands_helpers.cpp +++ b/src/mongo/s/commands/cluster_commands_helpers.cpp @@ -407,13 +407,13 @@ bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::stri return Command::appendCommandStatus(result, status); } -std::vector<NamespaceString> getAllShardedCollectionsForDb( - OperationContext* opCtx, StringData dbName, const repl::ReadConcernLevel& readConcern) { +std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opCtx, + StringData dbName) { const auto dbNameStr = dbName.toString(); std::vector<CollectionType> collectionsOnConfig; uassertStatusOK(Grid::get(opCtx)->catalogClient()->getCollections( - opCtx, &dbNameStr, &collectionsOnConfig, nullptr, readConcern)); + opCtx, &dbNameStr, &collectionsOnConfig, nullptr)); std::vector<NamespaceString> collectionsToReturn; for (const auto& coll : collectionsOnConfig) { diff --git a/src/mongo/s/commands/cluster_commands_helpers.h b/src/mongo/s/commands/cluster_commands_helpers.h index ca23d3a285c..adb41976b4e 100644 --- a/src/mongo/s/commands/cluster_commands_helpers.h +++ b/src/mongo/s/commands/cluster_commands_helpers.h @@ -136,10 +136,8 @@ bool appendEmptyResultSet(BSONObjBuilder& result, Status status, const std::stri * * Throws exception on errors. */ -std::vector<NamespaceString> getAllShardedCollectionsForDb( - OperationContext* opCtx, - StringData dbName, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern); +std::vector<NamespaceString> getAllShardedCollectionsForDb(OperationContext* opCtx, + StringData dbName); /** * Abstracts the common pattern of refreshing a collection and checking if it is sharded used across diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp index fa2c10f21fc..c88f90f79a6 100644 --- a/src/mongo/s/config_server_catalog_cache_loader.cpp +++ b/src/mongo/s/config_server_catalog_cache_loader.cpp @@ -87,16 +87,13 @@ QueryAndSort createConfigDiffQuery(const NamespaceString& nss, ChunkVersion coll /** * Blocking method, which returns the chunks which changed since the specified version. */ -CollectionAndChangedChunks getChangedChunks( - OperationContext* opCtx, - const NamespaceString& nss, - ChunkVersion sinceVersion, - const repl::ReadConcernLevel& readConcern = repl::ReadConcernLevel::kMajorityReadConcern) { +CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx, + const NamespaceString& nss, + ChunkVersion sinceVersion) { const auto catalogClient = Grid::get(opCtx)->catalogClient(); // Decide whether to do a full or partial load based on the state of the collection - const auto coll = - uassertStatusOK(catalogClient->getCollection(opCtx, nss.ns(), readConcern)).value; + const auto coll = uassertStatusOK(catalogClient->getCollection(opCtx, nss.ns())).value; uassert(ErrorCodes::NamespaceNotFound, str::stream() << "Collection " << nss.ns() << " is dropped.", !coll.getDropped()); @@ -171,26 +168,24 @@ Status ConfigServerCatalogCacheLoader::waitForCollectionVersion(OperationContext std::shared_ptr<Notification<void>> ConfigServerCatalogCacheLoader::getChunksSince( const NamespaceString& nss, ChunkVersion version, - stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn, - const repl::ReadConcernLevel& readConcern) { + stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn) { auto notify = std::make_shared<Notification<void>>(); - uassertStatusOK( - _threadPool.schedule([ this, nss, version, notify, callbackFn, readConcern ]() noexcept { - auto opCtx = Client::getCurrent()->makeOperationContext(); - - auto swCollAndChunks = [&]() -> StatusWith<CollectionAndChangedChunks> { - try { - return getChangedChunks(opCtx.get(), nss, version, readConcern); - } catch (const DBException& ex) { - return ex.toStatus(); - } - }(); - - callbackFn(opCtx.get(), std::move(swCollAndChunks)); - notify->set(); - })); + uassertStatusOK(_threadPool.schedule([ this, nss, version, notify, callbackFn ]() noexcept { + auto opCtx = Client::getCurrent()->makeOperationContext(); + + auto swCollAndChunks = [&]() -> StatusWith<CollectionAndChangedChunks> { + try { + return getChangedChunks(opCtx.get(), nss, version); + } catch (const DBException& ex) { + return ex.toStatus(); + } + }(); + + callbackFn(opCtx.get(), std::move(swCollAndChunks)); + notify->set(); + })); return notify; } diff --git a/src/mongo/s/config_server_catalog_cache_loader.h b/src/mongo/s/config_server_catalog_cache_loader.h index 9c584683c6b..1a2451628da 100644 --- a/src/mongo/s/config_server_catalog_cache_loader.h +++ b/src/mongo/s/config_server_catalog_cache_loader.h @@ -54,9 +54,8 @@ public: std::shared_ptr<Notification<void>> getChunksSince( const NamespaceString& nss, ChunkVersion version, - stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn, - const repl::ReadConcernLevel& readConcern = - repl::ReadConcernLevel::kMajorityReadConcern) override; + stdx::function<void(OperationContext*, StatusWith<CollectionAndChangedChunks>)> callbackFn) + override; private: // Thread pool to be used to perform metadata load |