diff options
author | Jack Mulrow <jack.mulrow@mongodb.com> | 2020-03-18 17:40:13 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-03-23 22:34:38 +0000 |
commit | 2debd8d3f087a50c3c7ec6d3d99ba6b056a24eab (patch) | |
tree | 50e10550a5ad53b5117e68c2260588161dd1fb10 | |
parent | 65f447b27e9b3db0315dd52d97d9e26fa462916c (diff) | |
download | mongo-2debd8d3f087a50c3c7ec6d3d99ba6b056a24eab.tar.gz |
SERVER-46799 Update sharding log lines to adhere to LOGV2 style guide
-rw-r--r-- | src/mongo/db/s/shard_filtering_metadata_refresh.cpp | 60 | ||||
-rw-r--r-- | src/mongo/db/s/shard_metadata_util.cpp | 20 | ||||
-rw-r--r-- | src/mongo/db/s/shard_server_catalog_cache_loader.cpp | 125 | ||||
-rw-r--r-- | src/mongo/db/s/sharded_connection_info.cpp | 2 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_initialization_mongod.cpp | 57 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_logging.cpp | 24 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state.cpp | 5 | ||||
-rw-r--r-- | src/mongo/db/s/sharding_state_recovery.cpp | 34 | ||||
-rw-r--r-- | src/mongo/db/s/shardsvr_shard_collection.cpp | 19 |
9 files changed, 202 insertions, 144 deletions
diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp index a97339898b6..4efc20e58a8 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp @@ -68,8 +68,10 @@ void onShardVersionMismatch(OperationContext* opCtx, LOGV2_DEBUG(22061, 2, - "Metadata refresh requested for {nss_ns} at shard version {shardVersionReceived}", - "nss_ns"_attr = nss.ns(), + "Metadata refresh requested for {namespace} at shard version " + "{shardVersionReceived}", + "Metadata refresh requested for collection", + "namespace"_attr = nss.ns(), "shardVersionReceived"_attr = shardVersionReceived); ShardingStatistics::get(opCtx).countStaleConfigErrors.addAndFetch(1); @@ -196,9 +198,10 @@ Status onShardVersionMismatchNoExcept(OperationContext* opCtx, return Status::OK(); } catch (const DBException& ex) { LOGV2(22062, - "Failed to refresh metadata for collection {nss}{causedBy_ex}", - "nss"_attr = nss, - "causedBy_ex"_attr = causedBy(redact(ex))); + "Failed to refresh metadata for {namespace} due to {error}", + "Failed to refresh metadata for collection", + "namespace"_attr = nss, + "error"_attr = redact(ex)); return ex.toStatus(); } } @@ -250,13 +253,16 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx, if (metadata->isSharded() && metadata->getCollVersion().epoch() == cm->getVersion().epoch() && metadata->getCollVersion() >= cm->getVersion()) { - LOGV2_DEBUG(22063, - 1, - "Skipping refresh of metadata for {nss} {metadata_getCollVersion} with " - "an older {cm_getVersion}", - "nss"_attr = nss, - "metadata_getCollVersion"_attr = metadata->getCollVersion(), - "cm_getVersion"_attr = cm->getVersion()); + LOGV2_DEBUG( + 22063, + 1, + "Skipping refresh of metadata for {namespace} {latestCollectionVersion} with " + "an older {refreshedCollectionVersion}", + "Skipping metadata refresh because collection already has at least as recent " + "metadata", + "namespace"_attr = nss, + "latestCollectionVersion"_attr = metadata->getCollVersion(), + "refreshedCollectionVersion"_attr = cm->getVersion()); return metadata->getShardVersion(); } } @@ -278,13 +284,16 @@ ChunkVersion forceShardFilteringMetadataRefresh(OperationContext* opCtx, if (metadata->isSharded() && metadata->getCollVersion().epoch() == cm->getVersion().epoch() && metadata->getCollVersion() >= cm->getVersion()) { - LOGV2_DEBUG(22064, - 1, - "Skipping refresh of metadata for {nss} {metadata_getCollVersion} with " - "an older {cm_getVersion}", - "nss"_attr = nss, - "metadata_getCollVersion"_attr = metadata->getCollVersion(), - "cm_getVersion"_attr = cm->getVersion()); + LOGV2_DEBUG( + 22064, + 1, + "Skipping refresh of metadata for {namespace} {latestCollectionVersion} with " + "an older {refreshedCollectionVersion}", + "Skipping metadata refresh because collection already has at least as recent " + "metadata", + "namespace"_attr = nss, + "latestCollectionVersion"_attr = metadata->getCollVersion(), + "refreshedCollectionVersion"_attr = cm->getVersion()); return metadata->getShardVersion(); } } @@ -307,9 +316,10 @@ Status onDbVersionMismatchNoExcept( return Status::OK(); } catch (const DBException& ex) { LOGV2(22065, - "Failed to refresh databaseVersion for database {dbName}{causedBy_ex}", - "dbName"_attr = dbName, - "causedBy_ex"_attr = causedBy(redact(ex))); + "Failed to refresh databaseVersion for database {db} {error}", + "Failed to refresh databaseVersion", + "db"_attr = dbName, + "error"_attr = redact(ex)); return ex.toStatus(); } } @@ -359,10 +369,12 @@ void forceDatabaseRefresh(OperationContext* opCtx, const StringData dbName) { cachedDbVersion->getLastMod() >= refreshedDbVersion.getLastMod()) { LOGV2_DEBUG(22066, 2, - "Skipping setting cached databaseVersion for {dbName} to refreshed version " + "Skipping setting cached databaseVersion for {db} to refreshed version " "{refreshedDbVersion} because current cached databaseVersion is already " "{cachedDbVersion}", - "dbName"_attr = dbName, + "Skipping setting cached databaseVersion to refreshed version " + "because current cached databaseVersion is more recent", + "db"_attr = dbName, "refreshedDbVersion"_attr = refreshedDbVersion.toBSON(), "cachedDbVersion"_attr = cachedDbVersion->toBSON()); return; diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp index 7cbc8d6e59d..652a48cdd38 100644 --- a/src/mongo/db/s/shard_metadata_util.cpp +++ b/src/mongo/db/s/shard_metadata_util.cpp @@ -425,10 +425,13 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const Namesp } } - LOGV2_DEBUG(22090, - 1, - "Successfully cleared persisted chunk metadata for collection '{nss}'.", - "nss"_attr = nss); + LOGV2_DEBUG( + 22090, + 1, + "Successfully cleared persisted chunk metadata and collection entry for collection " + "{namespace}", + "Successfully cleared persisted chunk metadata and collection entry", + "namespace"_attr = nss); return Status::OK(); } catch (const DBException& ex) { return ex.toStatus(); @@ -449,8 +452,8 @@ void dropChunks(OperationContext* opCtx, const NamespaceString& nss) { LOGV2_DEBUG(22091, 1, - "Successfully cleared persisted chunk metadata for collection '{nss}'.", - "nss"_attr = nss); + "Successfully cleared persisted chunk metadata for collection", + "namespace"_attr = nss); } Status deleteDatabasesEntry(OperationContext* opCtx, StringData dbName) { @@ -472,8 +475,9 @@ Status deleteDatabasesEntry(OperationContext* opCtx, StringData dbName) { LOGV2_DEBUG(22092, 1, - "Successfully cleared persisted metadata for db '{dbName}'.", - "dbName"_attr = dbName.toString()); + "Successfully cleared persisted metadata for db {db}", + "Successfully cleared persisted metadata for db", + "db"_attr = dbName); return Status::OK(); } catch (const DBException& ex) { return ex.toStatus(); diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index 0ff47d62080..428b98a5aeb 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -675,10 +675,11 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( LOGV2_FOR_CATALOG_REFRESH( 24107, 1, - "Cache loader remotely refreshed for collection {nss} from version " - "{maxLoaderVersion} and no metadata was found.", - "nss"_attr = nss, - "maxLoaderVersion"_attr = maxLoaderVersion); + "Cache loader remotely refreshed for collection {namespace} from version " + "{oldCollectionVersion} and no metadata was found", + "Cache loader remotely refreshed for collection and no metadata was found", + "namespace"_attr = nss, + "oldCollectionVersion"_attr = maxLoaderVersion); return swCollectionAndChangedChunks; } @@ -709,13 +710,12 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( LOGV2_FOR_CATALOG_REFRESH( 24108, 1, - "Cache loader remotely refreshed for collection {nss} from collection version " - "{maxLoaderVersion} and found collection version " - "{collAndChunks_changedChunks_back_getVersion}", - "nss"_attr = nss, - "maxLoaderVersion"_attr = maxLoaderVersion, - "collAndChunks_changedChunks_back_getVersion"_attr = - collAndChunks.changedChunks.back().getVersion()); + "Cache loader remotely refreshed for collection {namespace} from collection version " + "{oldCollectionVersion} and found collection version {refreshedCollectionVersion}", + "Cache loader remotely refreshed for collection", + "namespace"_attr = nss, + "oldCollectionVersion"_attr = maxLoaderVersion, + "refreshedCollectionVersion"_attr = collAndChunks.changedChunks.back().getVersion()); // Metadata was found remotely // -- otherwise we would have received NamespaceNotFound rather than Status::OK(). @@ -785,9 +785,11 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase( LOGV2_FOR_CATALOG_REFRESH( 24109, 1, - "Cache loader remotely refreshed for database {name} and found the database has " - "been dropped.", - "name"_attr = name); + "Cache loader remotely refreshed for database {db} and found the database has " + "been dropped", + "Cache loader remotely refreshed for database and found the database has been " + "dropped", + "db"_attr = name); return swDatabaseType; } @@ -799,10 +801,11 @@ void ShardServerCatalogCacheLoader::_schedulePrimaryGetDatabase( LOGV2_FOR_CATALOG_REFRESH(24110, 1, - "Cache loader remotely refreshed for database {name} and found " - "{swDatabaseType_getValue}", - "name"_attr = name, - "swDatabaseType_getValue"_attr = + "Cache loader remotely refreshed for database {db} and found " + "{refreshedDatabaseType}", + "Cache loader remotely refreshed for database", + "db"_attr = name, + "refreshedDatabaseType"_attr = swDatabaseType.getValue().toBSON()); return swDatabaseType; @@ -842,20 +845,22 @@ StatusWith<CollectionAndChangedChunks> ShardServerCatalogCacheLoader::_getLoader LOGV2_FOR_CATALOG_REFRESH( 24111, 1, - "Cache loader found {enqueued} and {persisted}, GTE " - "cache version {catalogCacheSinceVersion}", - "found"_attr = (enqueued.changedChunks.empty() - ? (tasksAreEnqueued ? "a drop enqueued" : "no enqueued metadata") - : ("enqueued metadata from " + - enqueued.changedChunks.front().getVersion().toString() + " to " + - enqueued.changedChunks.back().getVersion().toString())), - "persisted"_attr = + "Cache loader found {enqueuedTasksDesc} and {persistedMetadataDesc}, GTE cache version " + "{latestCachedVersion}", + "Cache loader state since the latest cached version", + "enqueuedTasksDesc"_attr = + (enqueued.changedChunks.empty() + ? (tasksAreEnqueued ? "a drop enqueued" : "no enqueued metadata") + : ("enqueued metadata from " + + enqueued.changedChunks.front().getVersion().toString() + " to " + + enqueued.changedChunks.back().getVersion().toString())), + "persistedMetadataDesc"_attr = (persisted.changedChunks.empty() ? "no persisted metadata" : ("persisted metadata from " + persisted.changedChunks.front().getVersion().toString() + " to " + persisted.changedChunks.back().getVersion().toString())), - "catalogCacheSinceVersion"_attr = catalogCacheSinceVersion); + "latestCachedVersion"_attr = catalogCacheSinceVersion); if (!tasksAreEnqueued) { // There are no tasks in the queue. Return the persisted metadata. @@ -982,14 +987,16 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString taskFinished = true; } catch (const ExceptionForCat<ErrorCategory::ShutdownError>&) { LOGV2(22094, - "Failed to persist chunk metadata update for collection '{nss}' due to shutdown.", - "nss"_attr = nss); + "Failed to persist chunk metadata update for collection {namespace} due to shutdown", + "Failed to persist chunk metadata update for collection due to shutdown", + "namespace"_attr = nss); inShutdown = true; } catch (const DBException& ex) { LOGV2(22095, - "Failed to persist chunk metadata update for collection '{nss}{causedBy_ex}", - "nss"_attr = nss, - "causedBy_ex"_attr = causedBy(redact(ex))); + "Failed to persist chunk metadata update for collection {namespace} {error}", + "Failed to persist chunk metadata update for collection", + "namespace"_attr = nss, + "error"_attr = redact(ex)); } { @@ -1020,10 +1027,13 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString if (ErrorCodes::isCancelationError(status.code())) { LOGV2(22096, "Cache loader failed to schedule a persisted metadata update task for namespace " - "'{nss}' due to '{status}'. Clearing task list so that scheduling will be " - "attempted by the next caller to refresh this namespace.", - "nss"_attr = nss, - "status"_attr = redact(status)); + "{namespace} due to {error}. Clearing task list so that scheduling will be " + "attempted by the next caller to refresh this namespace", + "Cache loader failed to schedule a persisted metadata update task. Clearing task " + "list so that scheduling will be attempted by the next caller to refresh this " + "namespace", + "namespace"_attr = nss, + "error"_attr = redact(status)); { stdx::lock_guard<Latch> lock(_mutex); @@ -1047,14 +1057,16 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) { taskFinished = true; } catch (const ExceptionForCat<ErrorCategory::ShutdownError>&) { LOGV2(22097, - "Failed to persist metadata update for db '{dbName}' due to shutdown.", - "dbName"_attr = dbName); + "Failed to persist metadata update for db {db} due to shutdown", + "Failed to persist metadata update for db due to shutdown", + "db"_attr = dbName); inShutdown = true; } catch (const DBException& ex) { LOGV2(22098, - "Failed to persist chunk metadata update for database {dbName}{causedBy_ex}", - "dbName"_attr = dbName, - "causedBy_ex"_attr = causedBy(redact(ex))); + "Failed to persist chunk metadata update for database {db} {error}", + "Failed to persist chunk metadata update for database", + "db"_attr = dbName, + "error"_attr = redact(ex)); } { @@ -1085,10 +1097,13 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) { if (ErrorCodes::isCancelationError(status.code())) { LOGV2(22099, "Cache loader failed to schedule a persisted metadata update task for namespace " - "'{name}' due to '{status}'. Clearing task list so that scheduling will be " - "attempted by the next caller to refresh this namespace.", - "name"_attr = name, - "status"_attr = redact(status)); + "{namespace} due to {error}. Clearing task list so that scheduling will be " + "attempted by the next caller to refresh this namespace", + "Cache loader failed to schedule a persisted metadata update task. Clearing task " + "list so that scheduling will be attempted by the next caller to refresh this " + "namespace", + "namespace"_attr = name, + "error"_attr = redact(status)); { stdx::lock_guard<Latch> lock(_mutex); @@ -1137,11 +1152,12 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata( LOGV2_FOR_CATALOG_REFRESH( 24112, 1, - "Successfully updated persisted chunk metadata for collection '{nss}' from " - "'{task_minQueryVersion}' to collection version '{task_maxQueryVersion}'.", - "nss"_attr = nss, - "task_minQueryVersion"_attr = task.minQueryVersion, - "task_maxQueryVersion"_attr = task.maxQueryVersion); + "Successfully updated persisted chunk metadata for collection {namespace} from " + "{oldCollectionVersion} to collection version {newCollectionVersion}", + "Successfully updated persisted chunk metadata for collection", + "namespace"_attr = nss, + "oldCollectionVersion"_attr = task.minQueryVersion, + "newCollectionVersion"_attr = task.maxQueryVersion); } void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext* opCtx, @@ -1173,8 +1189,9 @@ void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext* LOGV2_FOR_CATALOG_REFRESH(24113, 1, - "Successfully updated persisted metadata for db {dbName}", - "dbName"_attr = dbName.toString()); + "Successfully updated persisted metadata for db {db}", + "Successfully updated persisted metadata for db", + "db"_attr = dbName.toString()); } CollectionAndChangedChunks @@ -1214,7 +1231,9 @@ ShardServerCatalogCacheLoader::_getCompletePersistedMetadataForSecondarySinceVer 1, "Cache loader read meatadata while updates were being applied: this metadata may be " "incomplete. Retrying. Refresh state before read: {beginRefreshState}. Current refresh " - "state: '{endRefreshState}'.", + "state: {endRefreshState}", + "Cache loader read meatadata while updates were being applied: this metadata may be " + "incomplete. Retrying", "beginRefreshState"_attr = beginRefreshState, "endRefreshState"_attr = endRefreshState); } diff --git a/src/mongo/db/s/sharded_connection_info.cpp b/src/mongo/db/s/sharded_connection_info.cpp index 2624858265c..4d15e94b825 100644 --- a/src/mongo/db/s/sharded_connection_info.cpp +++ b/src/mongo/db/s/sharded_connection_info.cpp @@ -51,7 +51,7 @@ ShardedConnectionInfo* ShardedConnectionInfo::get(Client* client, bool create) { auto& current = clientSCI(client); if (!current && create) { - LOGV2_DEBUG(22060, 1, "entering shard mode for connection"); + LOGV2_DEBUG(22060, 1, "Entering shard mode for connection"); current.emplace(); } diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index c0d8223a467..8e8a35de617 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -108,16 +108,18 @@ public: if (ErrorCodes::isCancelationError(status.code())) { LOGV2_DEBUG(22067, 2, - "Unable to schedule confirmed set update due to {status}", - "status"_attr = status); + "Unable to schedule confirmed replica set update due to {error}", + "Unable to schedule confirmed replica set update", + "error"_attr = status); return; } invariant(status); try { LOGV2(22068, - "Updating config server with confirmed set {connStr}", - "connStr"_attr = connStr); + "Updating config server with confirmed replica set {connectionString}", + "Updating config server with confirmed replica set", + "connectionString"_attr = connStr); Grid::get(serviceContext)->shardRegistry()->updateReplSetHosts(connStr); if (MONGO_unlikely(failUpdateShardIdentityConfigString.shouldFail())) { @@ -139,7 +141,7 @@ public: ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), connStr); } catch (const ExceptionForCat<ErrorCategory::ShutdownError>& e) { - LOGV2(22069, "Unable to update config server due to {e}", "e"_attr = e); + LOGV2(22069, "Unable to update config server", "error"_attr = e); } }); } @@ -149,8 +151,8 @@ public: } catch (const DBException& ex) { LOGV2_DEBUG(22070, 2, - "Unable to update config server with possible set due to {ex}", - "ex"_attr = ex); + "Unable to update config server with possible replica set", + "error"_attr = ex); } } void onDroppedSet(const Key&) noexcept final {} @@ -189,10 +191,9 @@ void ShardingInitializationMongoD::initializeShardingEnvironmentOnShardServer( Grid::get(opCtx)->setShardingInitialized(); LOGV2(22071, - "Finished initializing sharding components for {isStandaloneOrPrimary_primary_secondary} " - "node.", - "isStandaloneOrPrimary_primary_secondary"_attr = - (isStandaloneOrPrimary ? "primary" : "secondary")); + "Finished initializing sharding components for {memberState} node.", + "Finished initializing sharding components", + "memberState"_attr = (isStandaloneOrPrimary ? "primary" : "secondary")); } ShardingInitializationMongoD::ShardingInitializationMongoD() @@ -286,11 +287,13 @@ bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(Operation if (!foundShardIdentity) { LOGV2_WARNING(22074, "Started with --shardsvr, but no shardIdentity document was found on " - "disk in {NamespaceString_kServerConfigurationNamespace}. This most " + "disk in {namespace}. This most " "likely means this server has not yet been added to a " "sharded cluster.", - "NamespaceString_kServerConfigurationNamespace"_attr = - NamespaceString::kServerConfigurationNamespace); + "Started with --shardsvr, but no shardIdentity document was found on " + "disk. This most likely means this server has not yet been added to a " + "sharded cluster", + "namespace"_attr = NamespaceString::kServerConfigurationNamespace); return false; } @@ -312,10 +315,10 @@ bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(Operation LOGV2_WARNING( 22075, "Not started with --shardsvr, but a shardIdentity document was found " - "on disk in {NamespaceString_kServerConfigurationNamespace}: {shardIdentityBSON}", - "NamespaceString_kServerConfigurationNamespace"_attr = - NamespaceString::kServerConfigurationNamespace, - "shardIdentityBSON"_attr = shardIdentityBSON); + "on disk in {namespace}: {shardIdentityDocument}", + "Not started with --shardsvr, but a shardIdentity document was found on disk", + "namespace"_attr = NamespaceString::kServerConfigurationNamespace, + "shardIdentityDocument"_attr = shardIdentityBSON); } return false; } @@ -331,8 +334,9 @@ void ShardingInitializationMongoD::initializeFromShardIdentity( "Invalid shard identity document found when initializing sharding state"); LOGV2(22072, - "initializing sharding state with: {shardIdentity}", - "shardIdentity"_attr = shardIdentity); + "Initializing sharding state with: {initialShardIdentity}", + "Initializing sharding state", + "initialShardIdentity"_attr = shardIdentity); const auto& configSvrConnStr = shardIdentity.getConfigsvrConnectionString(); @@ -384,13 +388,15 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString( auto result = update(opCtx, autoDb.getDb(), updateReq); if (result.numMatched == 0) { LOGV2_WARNING(22076, - "failed to update config string of shard identity document because it " - "does not exist. This shard could have been removed from the cluster"); + "Failed to update config server connection string of shard identity " + "document because it does not exist. This shard could have been removed " + "from the cluster"); } else { LOGV2_DEBUG(22073, 2, "Updated config server connection string in shardIdentity document " - "to{newConnectionString}", + "to {newConnectionString}", + "Updated config server connection string in shardIdentity document", "newConnectionString"_attr = newConnectionString); } } catch (const DBException& exception) { @@ -398,9 +404,10 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString( if (!ErrorCodes::isNotMasterError(status.code())) { LOGV2_WARNING(22077, "Error encountered while trying to update config connection string to " - "{newConnectionString}{causedBy_status}", + "{newConnectionString} {error}", + "Error encountered while trying to update config connection string", "newConnectionString"_attr = newConnectionString.toString(), - "causedBy_status"_attr = causedBy(redact(status))); + "error"_attr = redact(status)); } } } diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp index 621b176a1ab..c3e67354c9a 100644 --- a/src/mongo/db/s/sharding_logging.cpp +++ b/src/mongo/db/s/sharding_logging.cpp @@ -81,8 +81,9 @@ Status ShardingLogging::logAction(OperationContext* opCtx, _actionLogCollectionCreated.store(1); } else { LOGV2(22078, - "couldn't create config.actionlog collection:{causedBy_result}", - "causedBy_result"_attr = causedBy(result)); + "Couldn't create config.actionlog collection: {error}", + "Couldn't create config.actionlog collection", + "error"_attr = result); return result; } } @@ -109,8 +110,9 @@ Status ShardingLogging::logChangeChecked(OperationContext* opCtx, _changeLogCollectionCreated.store(1); } else { LOGV2(22079, - "couldn't create config.changelog collection:{causedBy_result}", - "causedBy_result"_attr = causedBy(result)); + "Couldn't create config.changelog collection: {error}", + "Couldn't create config.changelog collection", + "error"_attr = result); return result; } } @@ -149,9 +151,10 @@ Status ShardingLogging::_log(OperationContext* opCtx, BSONObj changeLogBSON = changeLog.toBSON(); LOGV2(22080, - "about to log metadata event into {logCollName}: {changeLogBSON}", - "logCollName"_attr = logCollName, - "changeLogBSON"_attr = redact(changeLogBSON)); + "About to log metadata event into {namespace}: {event}", + "About to log metadata event", + "namespace"_attr = logCollName, + "event"_attr = redact(changeLogBSON)); const NamespaceString nss("config", logCollName); Status result = Grid::get(opCtx)->catalogClient()->insertConfigDocument( @@ -160,10 +163,11 @@ Status ShardingLogging::_log(OperationContext* opCtx, if (!result.isOK()) { LOGV2_WARNING(22081, "Error encountered while logging config change with ID [{changeId}] into " - "collection {logCollName}: {result}", + "collection {namespace}: {error}", + "Error encountered while logging config change", "changeId"_attr = changeId, - "logCollName"_attr = logCollName, - "result"_attr = redact(result)); + "namespace"_attr = logCollName, + "error"_attr = redact(result)); } return result; diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index bf726297d0c..748e6701a2b 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -70,8 +70,9 @@ void ShardingState::setInitialized(ShardId shardId, OID clusterId) { void ShardingState::setInitialized(Status failedStatus) { invariant(!failedStatus.isOK()); LOGV2(22082, - "Failed to initialize sharding components{causedBy_failedStatus}", - "causedBy_failedStatus"_attr = causedBy(failedStatus)); + "Failed to initialize sharding components {error}", + "Failed to initialize sharding components", + "error"_attr = failedStatus); stdx::unique_lock<Latch> ul(_mutex); invariant(_getInitializationState() == InitializationState::kNew); diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp index f14e2b1e279..99c00c7faab 100644 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ b/src/mongo/db/s/sharding_state_recovery.cpp @@ -153,8 +153,9 @@ Status modifyRecoveryDocument(OperationContext* opCtx, LOGV2_DEBUG(22083, 1, - "Changing sharding recovery document {updateObj}", - "updateObj"_attr = redact(updateObj)); + "Changing sharding recovery document {update}", + "Changing sharding recovery document", + "update"_attr = redact(updateObj)); UpdateRequest updateReq(NamespaceString::kServerConfigurationNamespace); updateReq.setQuery(RecoveryDocument::getQuery()); @@ -200,8 +201,9 @@ void ShardingStateRecovery::endMetadataOp(OperationContext* opCtx) { modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions()); if (!status.isOK()) { LOGV2_WARNING(22088, - "Failed to decrement minOpTimeUpdaters due to {status}", - "status"_attr = redact(status)); + "Failed to decrement minOpTimeUpdaters due to {error}", + "Failed to decrement minOpTimeUpdaters", + "error"_attr = redact(status)); } } @@ -230,6 +232,7 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) { LOGV2(22084, "Sharding state recovery process found document {recoveryDoc}", + "Sharding state recovery process found document", "recoveryDoc"_attr = redact(recoveryDoc.toBSON())); if (!recoveryDoc.getMinOpTimeUpdaters()) { @@ -239,19 +242,22 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) { if (prevOpTime) { LOGV2(22085, "No in flight metadata change operations, so config server optime updated from " - "{prevOpTime} to {recoveryDoc_getMinOpTime}", - "prevOpTime"_attr = *prevOpTime, - "recoveryDoc_getMinOpTime"_attr = recoveryDoc.getMinOpTime()); + "{prevConfigServerMinOpTime} to {newConfigServerMinOpTime}", + "No in flight metadata change operations, so config server optime updated", + "prevConfigServerMinOpTime"_attr = *prevOpTime, + "newConfigServerMinOpTime"_attr = recoveryDoc.getMinOpTime()); } return Status::OK(); } LOGV2( 22086, - "Sharding state recovery document indicates there were {recoveryDoc_getMinOpTimeUpdaters} " + "Sharding state recovery document indicates there were {inProgressMetadataOperationCount} " "metadata change operations in flight. Contacting the config server primary in order " "to retrieve the most recent opTime.", - "recoveryDoc_getMinOpTimeUpdaters"_attr = recoveryDoc.getMinOpTimeUpdaters()); + "Sharding state recovery document indicates there were metadata change operations in " + "flight. Contacting the config server primary in order to retrieve the most recent opTime", + "inProgressMetadataOperationCount"_attr = recoveryDoc.getMinOpTimeUpdaters()); // Need to fetch the latest uptime from the config server, so do a logging write Status status = ShardingLogging::get(opCtx)->logChangeChecked( @@ -264,15 +270,17 @@ Status ShardingStateRecovery::recover(OperationContext* opCtx) { return status; LOGV2(22087, - "Sharding state recovered. New config server opTime is {grid_configOpTime}", - "grid_configOpTime"_attr = grid->configOpTime()); + "Sharding state recovered. New config server opTime is {newConfigServerOpTime}", + "Sharding state recovered", + "newConfigServerOpTime"_attr = grid->configOpTime()); // Finally, clear the recovery document so next time we don't need to recover status = modifyRecoveryDocument(opCtx, RecoveryDocument::Clear, kLocalWriteConcern); if (!status.isOK()) { LOGV2_WARNING(22089, - "Failed to reset sharding state recovery document due to {status}", - "status"_attr = redact(status)); + "Failed to reset sharding state recovery document due to {error}", + "Failed to reset sharding state recovery document", + "error"_attr = redact(status)); } return Status::OK(); diff --git a/src/mongo/db/s/shardsvr_shard_collection.cpp b/src/mongo/db/s/shardsvr_shard_collection.cpp index d346c038729..2bbf98f14b5 100644 --- a/src/mongo/db/s/shardsvr_shard_collection.cpp +++ b/src/mongo/db/s/shardsvr_shard_collection.cpp @@ -94,8 +94,9 @@ const ReadPreferenceSetting kConfigReadSelector(ReadPreference::Nearest, TagSet{ void uassertStatusOKWithWarning(const Status& status) { if (!status.isOK()) { LOGV2_WARNING(22103, - "shardsvrShardCollection failed{causedBy_status}", - "causedBy_status"_attr = causedBy(redact(status))); + "shardsvrShardCollection failed {error}", + "shardsvrShardCollection failed", + "error"_attr = redact(status)); uassertStatusOK(status); } } @@ -347,7 +348,8 @@ void logStartShardCollection(OperationContext* opCtx, const ShardsvrShardCollection& request, const ShardCollectionTargetState& prerequisites, const ShardId& dbPrimaryShardId) { - LOGV2(22100, "CMD: shardcollection: {cmdObj}", "cmdObj"_attr = cmdObj); + LOGV2( + 22100, "CMD: shardcollection: {command}", "CMD: shardcollection", "command"_attr = cmdObj); audit::logShardCollection( opCtx->getClient(), nss.ns(), prerequisites.shardKeyPattern.toBSON(), request.getUnique()); @@ -595,11 +597,12 @@ UUID shardCollection(OperationContext* opCtx, } LOGV2(22101, - "Created {initialChunks_chunks_size} chunk(s) for: {nss}, producing collection version " - "{initialChunks_collVersion}", - "initialChunks_chunks_size"_attr = initialChunks.chunks.size(), - "nss"_attr = nss, - "initialChunks_collVersion"_attr = initialChunks.collVersion()); + "Created {numInitialChunks} chunk(s) for: {namespace}, producing collection version " + "{initialCollectionVersion}", + "Created initial chunk(s)", + "numInitialChunks"_attr = initialChunks.chunks.size(), + "namespace"_attr = nss, + "initialCollectionVersion"_attr = initialChunks.collVersion()); ShardingLogging::get(opCtx)->logChange( |