summaryrefslogtreecommitdiff
path: root/src/mongo
diff options
context:
space:
mode:
authorCheahuychou Mao <cheahuychou.mao@mongodb.com>2018-07-19 17:27:06 -0400
committerKelsey Schubert <kelsey@mongodb.com>2018-09-06 17:16:49 -0400
commitfa32ebcfd379c3baa5dbe7c1273a636a66056033 (patch)
treec8e7601e131604c0e40853f1230337d5afcf8d26 /src/mongo
parent9d0653fa21498631990abede00914c0ed5d1a62b (diff)
downloadmongo-fa32ebcfd379c3baa5dbe7c1273a636a66056033.tar.gz
SERVER-30841 Lower the amount of metadata refresh logging
(cherry picked from commit 8b066e35dd7c5a29e17f2d4b1cab69e984fadebc)
Diffstat (limited to 'src/mongo')
-rw-r--r--src/mongo/db/s/shard_server_catalog_cache_loader.cpp61
-rw-r--r--src/mongo/logger/log_component.cpp5
-rw-r--r--src/mongo/logger/log_component.h1
-rw-r--r--src/mongo/s/catalog_cache.cpp42
-rw-r--r--src/mongo/s/catalog_cache_loader.h3
5 files changed, 68 insertions, 44 deletions
diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
index 834645bbece..553380d0502 100644
--- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
+++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp
@@ -645,8 +645,9 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
return;
}
- LOG(1) << "Cache loader remotely refreshed for collection " << nss << " from version "
- << maxLoaderVersion << " and no metadata was found.";
+ LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for collection " << nss
+ << " from version " << maxLoaderVersion
+ << " and no metadata was found.";
} else if (swCollectionAndChangedChunks.isOK()) {
auto& collAndChunks = swCollectionAndChangedChunks.getValue();
@@ -676,10 +677,10 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince(
}
}
- LOG(1) << "Cache loader remotely refreshed for collection " << nss
- << " from collection version " << maxLoaderVersion
- << " and found collection version "
- << collAndChunks.changedChunks.back().getVersion();
+ LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for collection " << nss
+ << " from collection version " << maxLoaderVersion
+ << " and found collection version "
+ << collAndChunks.changedChunks.back().getVersion();
// Metadata was found remotely -- otherwise would have received NamespaceNotFound
// rather than Status::OK(). Return metadata for CatalogCache that's GTE
@@ -731,8 +732,8 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase(
return;
}
- LOG(1) << "Cache loader remotely refreshed for database " << name
- << " and found the database has been dropped.";
+ LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for database " << name
+ << " and found the database has been dropped.";
} else if (swDatabaseType.isOK()) {
Status scheduleStatus = _ensureMajorityPrimaryAndScheduleDbTask(
@@ -742,8 +743,8 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase(
return;
}
- LOG(1) << "Cache loader remotely refreshed for database " << name << " and found "
- << swDatabaseType.getValue().toBSON();
+ LOG_CATALOG_REFRESH(1) << "Cache loader remotely refreshed for database " << name
+ << " and found " << swDatabaseType.getValue().toBSON();
}
// Complete the callbackFn work.
@@ -778,18 +779,19 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getLoader
persisted = std::move(swPersisted.getValue());
}
- LOG(1) << "Cache loader found "
- << (enqueued.changedChunks.empty()
- ? (tasksAreEnqueued ? "a drop enqueued" : "no enqueued metadata")
- : ("enqueued metadata from " +
- enqueued.changedChunks.front().getVersion().toString() + " to " +
- enqueued.changedChunks.back().getVersion().toString()))
- << " and " << (persisted.changedChunks.empty()
- ? "no persisted metadata"
- : ("persisted metadata from " +
- persisted.changedChunks.front().getVersion().toString() + " to " +
- persisted.changedChunks.back().getVersion().toString()))
- << ", GTE cache version " << catalogCacheSinceVersion;
+ LOG_CATALOG_REFRESH(1)
+ << "Cache loader found "
+ << (enqueued.changedChunks.empty()
+ ? (tasksAreEnqueued ? "a drop enqueued" : "no enqueued metadata")
+ : ("enqueued metadata from " +
+ enqueued.changedChunks.front().getVersion().toString() + " to " +
+ enqueued.changedChunks.back().getVersion().toString()))
+ << " and " << (persisted.changedChunks.empty()
+ ? "no persisted metadata"
+ : ("persisted metadata from " +
+ persisted.changedChunks.front().getVersion().toString() + " to " +
+ persisted.changedChunks.back().getVersion().toString()))
+ << ", GTE cache version " << catalogCacheSinceVersion;
if (!tasksAreEnqueued) {
// There are no tasks in the queue. Return the persisted metadata.
@@ -1023,8 +1025,9 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata(
<< task.maxQueryVersion.toString()
<< "'. Will be retried.");
- LOG(1) << "Successfully updated persisted chunk metadata for collection '" << nss << "' from '"
- << task.minQueryVersion << "' to collection version '" << task.maxQueryVersion << "'.";
+ LOG_CATALOG_REFRESH(1) << "Successfully updated persisted chunk metadata for collection '"
+ << nss << "' from '" << task.minQueryVersion
+ << "' to collection version '" << task.maxQueryVersion << "'.";
}
void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext* opCtx,
@@ -1056,7 +1059,8 @@ void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext*
<< dbName.toString()
<< "'. Will be retried.");
- LOG(1) << "Successfully updated persisted metadata for db " << dbName.toString();
+ LOG_CATALOG_REFRESH(1) << "Successfully updated persisted metadata for db "
+ << dbName.toString();
}
CollectionAndChangedChunks
@@ -1091,9 +1095,10 @@ ShardServerCatalogCacheLoader::_getCompletePersistedMetadataForSecondarySinceVer
return collAndChangedChunks;
}
- LOG(1) << "Cache loader read meatadata while updates were being applied: this metadata may"
- << " be incomplete. Retrying. Refresh state before read: " << beginRefreshState
- << ". Current refresh state: '" << endRefreshState << "'.";
+ LOG_CATALOG_REFRESH(1)
+ << "Cache loader read meatadata while updates were being applied: this metadata may"
+ << " be incomplete. Retrying. Refresh state before read: " << beginRefreshState
+ << ". Current refresh state: '" << endRefreshState << "'.";
}
}
diff --git a/src/mongo/logger/log_component.cpp b/src/mongo/logger/log_component.cpp
index 31ca2c68f03..041ef7b1740 100644
--- a/src/mongo/logger/log_component.cpp
+++ b/src/mongo/logger/log_component.cpp
@@ -77,6 +77,7 @@ LogComponent LogComponent::parent() const {
DECLARE_LOG_COMPONENT_PARENT(kBridge, kNetwork);
DECLARE_LOG_COMPONENT_PARENT(kReplicationHeartbeats, kReplication);
DECLARE_LOG_COMPONENT_PARENT(kReplicationRollback, kReplication);
+ DECLARE_LOG_COMPONENT_PARENT(kShardingCatalogRefresh, kSharding);
DECLARE_LOG_COMPONENT_PARENT(kStorageRecovery, kStorage);
case kNumLogComponents:
return kNumLogComponents;
@@ -114,6 +115,8 @@ StringData LogComponent::toStringData() const {
return "rollback"_sd;
case kSharding:
return "sharding"_sd;
+ case kShardingCatalogRefresh:
+ return "shardingCatalogRefresh"_sd;
case kStorage:
return "storage"_sd;
case kStorageRecovery:
@@ -194,6 +197,8 @@ StringData LogComponent::getNameForLog() const {
return "ROLLBACK"_sd;
case kSharding:
return "SHARDING"_sd;
+ case kShardingCatalogRefresh:
+ return "SH_REFR "_sd;
case kStorage:
return "STORAGE "_sd;
case kStorageRecovery:
diff --git a/src/mongo/logger/log_component.h b/src/mongo/logger/log_component.h
index af6f0a03f25..b6abcefd720 100644
--- a/src/mongo/logger/log_component.h
+++ b/src/mongo/logger/log_component.h
@@ -56,6 +56,7 @@ public:
kReplicationHeartbeats,
kReplicationRollback,
kSharding,
+ kShardingCatalogRefresh,
kStorage,
kStorageRecovery,
kJournal,
diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp
index d05e6b017fb..30ec5bf8a7c 100644
--- a/src/mongo/s/catalog_cache.cpp
+++ b/src/mongo/s/catalog_cache.cpp
@@ -432,20 +432,20 @@ void CatalogCache::_scheduleDatabaseRefresh(WithLock,
const std::string& dbName,
std::shared_ptr<DatabaseInfoEntry> dbEntry) {
- log() << "Refreshing cached database entry for " << dbName
- << "; current cached database info is "
- << (dbEntry->dbt ? dbEntry->dbt->toBSON() : BSONObj());
+ LOG_CATALOG_REFRESH(1) << "Refreshing cached database entry for " << dbName
+ << "; current cached database info is "
+ << (dbEntry->dbt ? dbEntry->dbt->toBSON() : BSONObj());
const auto onRefreshCompleted =
[ this, t = Timer(), dbName ](const StatusWith<DatabaseType>& swDbt) {
// TODO (SERVER-34164): Track and increment stats for database refreshes.
if (!swDbt.isOK()) {
- log() << "Refresh for database " << dbName << " took " << t.millis() << " ms and failed"
- << causedBy(redact(swDbt.getStatus()));
+ LOG_CATALOG_REFRESH(0) << "Refresh for database " << dbName << " took " << t.millis()
+ << " ms and failed" << causedBy(redact(swDbt.getStatus()));
return;
}
- log() << "Refresh for database " << dbName << " took " << t.millis() << " ms and found "
- << swDbt.getValue().toBSON();
+ LOG_CATALOG_REFRESH(0) << "Refresh for database " << dbName << " took " << t.millis()
+ << " ms and found " << swDbt.getValue().toBSON();
};
const auto onRefreshFailed =
@@ -511,7 +511,7 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
}
// Invoked when one iteration of getChunksSince has completed, whether with success or error
- const auto onRefreshCompleted = [ this, t = Timer(), nss, isIncremental ](
+ const auto onRefreshCompleted = [ this, t = Timer(), nss, isIncremental, existingRoutingInfo ](
const Status& status, RoutingTableHistory* routingInfoAfterRefresh) {
if (isIncremental) {
_stats.numActiveIncrementalRefreshes.subtractAndFetch(1);
@@ -522,14 +522,24 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
if (!status.isOK()) {
_stats.countFailedRefreshes.addAndFetch(1);
- log() << "Refresh for collection " << nss << " took " << t.millis() << " ms and failed"
- << causedBy(redact(status));
+ LOG_CATALOG_REFRESH(0) << "Refresh for collection " << nss << " took " << t.millis()
+ << " ms and failed" << causedBy(redact(status));
} else if (routingInfoAfterRefresh) {
- log() << "Refresh for collection " << nss << " took " << t.millis()
- << " ms and found version " << routingInfoAfterRefresh->getVersion();
+ const int logLevel = (!existingRoutingInfo || (existingRoutingInfo &&
+ routingInfoAfterRefresh->getVersion() !=
+ existingRoutingInfo->getVersion()))
+ ? 0
+ : 1;
+ LOG_CATALOG_REFRESH(logLevel)
+ << "Refresh for collection " << nss.toString()
+ << (existingRoutingInfo
+ ? (" from version " + existingRoutingInfo->getVersion().toString())
+ : "")
+ << " to version " << routingInfoAfterRefresh->getVersion().toString() << " took "
+ << t.millis() << " ms";
} else {
- log() << "Refresh for collection " << nss << " took " << t.millis()
- << " ms and found the collection is not sharded";
+ LOG_CATALOG_REFRESH(0) << "Refresh for collection " << nss << " took " << t.millis()
+ << " ms and found the collection is not sharded";
}
};
@@ -589,8 +599,8 @@ void CatalogCache::_scheduleCollectionRefresh(WithLock lk,
const ChunkVersion startingCollectionVersion =
(existingRoutingInfo ? existingRoutingInfo->getVersion() : ChunkVersion::UNSHARDED());
- log() << "Refreshing chunks for collection " << nss << " based on version "
- << startingCollectionVersion;
+ LOG_CATALOG_REFRESH(1) << "Refreshing chunks for collection " << nss << " based on version "
+ << startingCollectionVersion;
try {
_cacheLoader.getChunksSince(nss, startingCollectionVersion, refreshCallback);
diff --git a/src/mongo/s/catalog_cache_loader.h b/src/mongo/s/catalog_cache_loader.h
index 90b2716fdfb..d43b2fb698e 100644
--- a/src/mongo/s/catalog_cache_loader.h
+++ b/src/mongo/s/catalog_cache_loader.h
@@ -41,6 +41,9 @@
#include "mongo/util/concurrency/notification.h"
#include "mongo/util/uuid.h"
+#define LOG_CATALOG_REFRESH(level) \
+ MONGO_LOG_COMPONENT(level, ::mongo::logger::LogComponent::kShardingCatalogRefresh)
+
namespace mongo {
class NamespaceString;