summaryrefslogtreecommitdiff
path: root/src/mongo/s/catalog_cache.cpp
diff options
context:
space:
mode:
authorEsha Maharishi <esha.maharishi@mongodb.com>2017-08-08 09:07:08 -0400
committerEsha Maharishi <esha.maharishi@mongodb.com>2017-08-08 15:30:56 -0400
commita70b206b4d056de139e649e17d7c1ca3a5c79a52 (patch)
treede009d0533bf66824190edc4c39977bff4199b70 /src/mongo/s/catalog_cache.cpp
parent7bc94c5f02e9d32b7faca61013f8b70b92c1437f (diff)
downloadmongo-a70b206b4d056de139e649e17d7c1ca3a5c79a52.tar.gz
Revert "SERVER-30219 Make metadata commands use kLocalReadConcern"
This reverts commit e101217eabfaa8d10c1c6cce969fa773ead966f2.
Diffstat (limited to 'src/mongo/s/catalog_cache.cpp')
-rw-r--r--src/mongo/s/catalog_cache.cpp31
1 files changed, 13 insertions, 18 deletions
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index 26f95a0f635..1cba448d443 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -125,13 +125,11 @@ StatusWith<CachedDatabaseInfo> CatalogCache::getDatabase(OperationContext* opCtx
}
StatusWith<CachedCollectionRoutingInfo> CatalogCache::getCollectionRoutingInfo(
- OperationContext* opCtx,
- const NamespaceString& nss,
- const repl::ReadConcernLevel& readConcern) {
+ OperationContext* opCtx, const NamespaceString& nss) {
while (true) {
std::shared_ptr<DatabaseInfoEntry> dbEntry;
try {
- dbEntry = _getDatabase(opCtx, nss.db(), readConcern);
+ dbEntry = _getDatabase(opCtx, nss.db());
} catch (const DBException& ex) {
return ex.toStatus();
}
@@ -163,7 +161,7 @@ StatusWith<CachedCollectionRoutingInfo> CatalogCache::getCollectionRoutingInfo(
refreshNotification = (collEntry.refreshCompletionNotification =
std::make_shared<Notification<Status>>());
_scheduleCollectionRefresh_inlock(
- dbEntry, std::move(collEntry.routingInfo), nss, 1, readConcern);
+ dbEntry, std::move(collEntry.routingInfo), nss, 1);
}
// Wait on the notification outside of the mutex
@@ -278,8 +276,8 @@ void CatalogCache::purgeAllDatabases() {
_databases.clear();
}
-std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase(
- OperationContext* opCtx, StringData dbName, const repl::ReadConcernLevel& readConcern) {
+std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase(OperationContext* opCtx,
+ StringData dbName) {
stdx::lock_guard<stdx::mutex> lg(_mutex);
auto it = _databases.find(dbName);
@@ -292,15 +290,14 @@ std::shared_ptr<CatalogCache::DatabaseInfoEntry> CatalogCache::_getDatabase(
const auto dbNameCopy = dbName.toString();
// Load the database entry
- const auto opTimeWithDb =
- uassertStatusOK(catalogClient->getDatabase(opCtx, dbNameCopy, readConcern));
+ const auto opTimeWithDb = uassertStatusOK(catalogClient->getDatabase(opCtx, dbNameCopy));
const auto& dbDesc = opTimeWithDb.value;
// Load the sharded collections entries
std::vector<CollectionType> collections;
repl::OpTime collLoadConfigOptime;
- uassertStatusOK(catalogClient->getCollections(
- opCtx, &dbNameCopy, &collections, &collLoadConfigOptime, readConcern));
+ uassertStatusOK(
+ catalogClient->getCollections(opCtx, &dbNameCopy, &collections, &collLoadConfigOptime));
StringMap<CollectionRoutingInfoEntry> collectionEntries;
for (const auto& coll : collections) {
@@ -319,15 +316,14 @@ void CatalogCache::_scheduleCollectionRefresh_inlock(
std::shared_ptr<DatabaseInfoEntry> dbEntry,
std::shared_ptr<ChunkManager> existingRoutingInfo,
const NamespaceString& nss,
- int refreshAttempt,
- const repl::ReadConcernLevel& readConcern) {
+ int refreshAttempt) {
Timer t;
const ChunkVersion startingCollectionVersion =
(existingRoutingInfo ? existingRoutingInfo->getVersion() : ChunkVersion::UNSHARDED());
const auto refreshFailed_inlock =
- [ this, t, dbEntry, nss, refreshAttempt, readConcern ](const Status& status) noexcept {
+ [ this, t, dbEntry, nss, refreshAttempt ](const Status& status) noexcept {
log() << "Refresh for collection " << nss << " took " << t.millis() << " ms and failed"
<< causedBy(redact(status));
@@ -340,8 +336,7 @@ void CatalogCache::_scheduleCollectionRefresh_inlock(
// refresh again
if (status == ErrorCodes::ConflictingOperationInProgress &&
refreshAttempt < kMaxInconsistentRoutingInfoRefreshAttempts) {
- _scheduleCollectionRefresh_inlock(
- dbEntry, nullptr, nss, refreshAttempt + 1, readConcern);
+ _scheduleCollectionRefresh_inlock(dbEntry, nullptr, nss, refreshAttempt + 1);
} else {
// Leave needsRefresh to true so that any subsequent get attempts will kick off
// another round of refresh
@@ -351,7 +346,7 @@ void CatalogCache::_scheduleCollectionRefresh_inlock(
};
const auto refreshCallback =
- [ this, t, dbEntry, nss, existingRoutingInfo, refreshFailed_inlock, readConcern ](
+ [ this, t, dbEntry, nss, existingRoutingInfo, refreshFailed_inlock ](
OperationContext * opCtx,
StatusWith<CatalogCacheLoader::CollectionAndChangedChunks> swCollAndChunks) noexcept {
std::shared_ptr<ChunkManager> newRoutingInfo;
@@ -391,7 +386,7 @@ void CatalogCache::_scheduleCollectionRefresh_inlock(
<< startingCollectionVersion;
try {
- _cacheLoader.getChunksSince(nss, startingCollectionVersion, refreshCallback, readConcern);
+ _cacheLoader.getChunksSince(nss, startingCollectionVersion, refreshCallback);
} catch (const DBException& ex) {
const auto status = ex.toStatus();