diff options
author | Lamont Nelson <lamont.nelson@Lamonts-MBP.fios-router.home> | 2020-03-18 23:14:08 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2020-04-01 15:36:39 +0000 |
commit | 96d3b79232ceca57eb8206f5fbbe52a8050c1936 (patch) | |
tree | 697c932ed7ed10d160829b636a67fdcf6a3efebd | |
parent | 80ae3c29b4e293d3929ef05a5e8cbfa69dfae883 (diff) | |
download | mongo-96d3b79232ceca57eb8206f5fbbe52a8050c1936.tar.gz |
SERVER-46799: update log lines for style guide
10 files changed, 124 insertions, 73 deletions
diff --git a/src/mongo/db/s/config/configsvr_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_move_primary_command.cpp index 53eb5aa552f..0aec2fb5288 100644 --- a/src/mongo/db/s/config/configsvr_move_primary_command.cpp +++ b/src/mongo/db/s/config/configsvr_move_primary_command.cpp @@ -159,12 +159,11 @@ public: auto toShardStatus = shardRegistry->getShard(opCtx, to); if (!toShardStatus.isOK()) { LOGV2(21921, - "Could not move database '{dbname}' to shard " - "'{to}{causedBy_toShardStatus_getStatus}", - "dbname"_attr = dbname, - "to"_attr = to, - "causedBy_toShardStatus_getStatus"_attr = - causedBy(toShardStatus.getStatus())); + "Could not move database {db} to shard {shardId}: {error}", + "Could not move database to shard", + "db"_attr = dbname, + "shardId"_attr = to, + "error"_attr = toShardStatus.getStatus()); uassertStatusOKWithContext(toShardStatus.getStatus(), str::stream() << "Could not move database '" << dbname << "' to shard '" << to << "'"); diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp index bff5dee8401..e6bbec377eb 100644 --- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp +++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp @@ -152,6 +152,7 @@ public: LOGV2(21922, "CMD: refineCollectionShardKey: {request}", + "CMD: refineCollectionShardKey", "request"_attr = request().toBSON({})); audit::logRefineCollectionShardKey(opCtx->getClient(), nss.ns(), proposedKey); diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp index 4c0dc80a658..0d3015f15f3 100644 --- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp @@ -116,7 +116,11 @@ public: try { return shardingCatalogManager->removeShard(opCtx, shardId); } catch (const DBException& ex) { - LOGV2(21923, "Failed to remove shard due to {ex}", "ex"_attr = redact(ex)); + LOGV2(21923, + "Failed to remove shard {shardId} due to {error}", + "Failed to remove shard", + "shardId"_attr = shardId, + "error"_attr = redact(ex)); throw; } }(); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp index 7e05e1437a7..29639e79e6c 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp @@ -422,16 +422,27 @@ void ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Names BSONObj(), ShardingCatalogClient::kMajorityWriteConcern)); - LOGV2_DEBUG(21924, 1, "dropCollection {nss_ns} started", "nss_ns"_attr = nss.ns()); + LOGV2_DEBUG(21924, + 1, + "dropCollection {namespace} started", + "dropCollection started", + "namespace"_attr = nss.ns()); sendDropCollectionToAllShards(opCtx, nss); - LOGV2_DEBUG(21925, 1, "dropCollection {nss_ns} shard data deleted", "nss_ns"_attr = nss.ns()); + LOGV2_DEBUG(21925, + 1, + "dropCollection {namespace} shard data deleted", + "dropCollection shard data deleted", + "namespace"_attr = nss.ns()); removeChunksAndTagsForDroppedCollection(opCtx, nss); - LOGV2_DEBUG( - 21926, 1, "dropCollection {nss_ns} chunk and tag data deleted", "nss_ns"_attr = nss.ns()); + LOGV2_DEBUG(21926, + 1, + "dropCollection {namespace} chunk and tag data deleted", + "dropCollection chunk and tag data deleted", + "namespace"_attr = nss.ns()); // Mark the collection as dropped CollectionType coll; @@ -444,12 +455,19 @@ void ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Names uassertStatusOK(ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection( opCtx, nss, coll, upsert)); - LOGV2_DEBUG( - 21927, 1, "dropCollection {nss_ns} collection marked as dropped", "nss_ns"_attr = nss.ns()); + LOGV2_DEBUG(21927, + 1, + "dropCollection {namespace} collection marked as dropped", + "dropCollection collection marked as dropped", + "namespace"_attr = nss.ns()); sendSSVToAllShards(opCtx, nss); - LOGV2_DEBUG(21928, 1, "dropCollection {nss_ns} completed", "nss_ns"_attr = nss.ns()); + LOGV2_DEBUG(21928, + 1, + "dropCollection {namespace} completed", + "dropCollection completed", + "namespace"_attr = nss.ns()); ShardingLogging::get(opCtx)->logChange( opCtx, "dropCollection", nss.ns(), BSONObj(), ShardingCatalogClient::kMajorityWriteConcern); @@ -460,8 +478,9 @@ void ShardingCatalogManager::ensureDropCollectionCompleted(OperationContext* opC LOGV2_DEBUG(21929, 1, - "Ensuring config entries for {nss_ns} from previous dropCollection are cleared", - "nss_ns"_attr = nss.ns()); + "Ensuring config entries for {namespace} from previous dropCollection are cleared", + "Ensuring config entries from previous dropCollection are cleared", + "namespace"_attr = nss.ns()); sendDropCollectionToAllShards(opCtx, nss); removeChunksAndTagsForDroppedCollection(opCtx, nss); sendSSVToAllShards(opCtx, nss); @@ -496,10 +515,10 @@ void ShardingCatalogManager::generateUUIDsForExistingShardedCollections(Operatio } // Generate and persist a new UUID for each collection that did not have a UUID. - LOGV2( - 21931, - "generating UUIDs for {shardedColls_size} sharded collections that do not yet have a UUID", - "shardedColls_size"_attr = shardedColls.size()); + LOGV2(21931, + "Generating UUIDs for {collectionCount} sharded collections that do not yet have a UUID", + "Generating UUIDs for sharded collections that do not yet have a UUID", + "collectionCount"_attr = shardedColls.size()); for (auto& coll : shardedColls) { auto collType = uassertStatusOK(CollectionType::fromBSON(coll)); invariant(!collType.getUUID()); @@ -511,10 +530,11 @@ void ShardingCatalogManager::generateUUIDsForExistingShardedCollections(Operatio opCtx, collType.getNs(), collType, false /* upsert */)); LOGV2_DEBUG(21932, 2, - "updated entry in config.collections for sharded collection {collType_getNs} " - "with generated UUID {uuid}", - "collType_getNs"_attr = collType.getNs(), - "uuid"_attr = uuid); + "Updated entry in config.collections for sharded collection {namespace} " + "with UUID {generatedUUID}", + "Updated entry in config.collections for sharded collection", + "namespace"_attr = collType.getNs(), + "generatedUUID"_attr = uuid); } } @@ -629,11 +649,12 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, txnNumber)); LOGV2(21933, - "refineCollectionShardKey: updated collection entry for '{nss_ns}': took " - "{executionTimer_millis} ms. Total time taken: {totalTimer_millis} ms.", - "nss_ns"_attr = nss.ns(), - "executionTimer_millis"_attr = executionTimer.millis(), - "totalTimer_millis"_attr = totalTimer.millis()); + "refineCollectionShardKey updated collection entry for {namespace}: took " + "{durationMillis} ms. Total time taken: {totalTimeMillis} ms.", + "refineCollectionShardKey updated collection entry", + "namespace"_attr = nss.ns(), + "durationMillis"_attr = executionTimer.millis(), + "totalTimeMillis"_attr = totalTimer.millis()); executionTimer.reset(); // Update all config.chunks entries for the given namespace by setting (i) their epoch to @@ -668,11 +689,12 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, txnNumber)); LOGV2(21935, - "refineCollectionShardKey: updated chunk entries for '{nss_ns}': took " - "{executionTimer_millis} ms. Total time taken: {totalTimer_millis} ms.", - "nss_ns"_attr = nss.ns(), - "executionTimer_millis"_attr = executionTimer.millis(), - "totalTimer_millis"_attr = totalTimer.millis()); + "refineCollectionShardKey: updated chunk entries for {namespace}: took " + "{durationMillis} ms. Total time taken: {totalTimeMillis} ms.", + "refineCollectionShardKey: updated chunk entries", + "namespace"_attr = nss.ns(), + "durationMillis"_attr = executionTimer.millis(), + "totalTimeMillis"_attr = totalTimer.millis()); executionTimer.reset(); // Update all config.tags entries for the given namespace by setting their bounds for each @@ -698,11 +720,12 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, txnNumber)); LOGV2(21936, - "refineCollectionShardKey: updated zone entries for '{nss_ns}': took " - "{executionTimer_millis} ms. Total time taken: {totalTimer_millis} ms.", - "nss_ns"_attr = nss.ns(), - "executionTimer_millis"_attr = executionTimer.millis(), - "totalTimer_millis"_attr = totalTimer.millis()); + "refineCollectionShardKey: updated zone entries for {namespace}: took " + "{durationMillis} ms. Total time taken: {totalTimeMillis} ms.", + "refineCollectionShardKey: updated zone entries", + "namespace"_attr = nss.ns(), + "durationMillis"_attr = executionTimer.millis(), + "totalTimeMillis"_attr = totalTimer.millis()); if (MONGO_unlikely(hangRefineCollectionShardKeyBeforeCommit.shouldFail())) { LOGV2(21937, "Hit hangRefineCollectionShardKeyBeforeCommit failpoint"); @@ -723,11 +746,13 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, try { triggerFireAndForgetShardRefreshes(opCtx, nss); } catch (const DBException& ex) { - LOGV2(51798, - "refineCollectionShardKey: failed to best-effort refresh all shards containing " - "chunks in '{ns}'", - "error"_attr = ex.toStatus(), - "ns"_attr = nss.ns()); + LOGV2( + 51798, + "refineCollectionShardKey: failed to best-effort refresh all shards containing chunks " + "in {namespace}", + "refineCollectionShardKey: failed to best-effort refresh all shards containing chunks", + "error"_attr = ex.toStatus(), + "namespace"_attr = nss.ns()); } } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp index 023408dcd71..98fc81a30f3 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp @@ -162,7 +162,10 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx, DatabaseType db( dbName.toString(), shardPtr->getId(), false, databaseVersion::makeNew()); - LOGV2(21938, "Registering new database {db} in sharding catalog", "db"_attr = db); + LOGV2(21938, + "Registering new database {db} in sharding catalog", + "Registering new database in sharding catalog", + "db"_attr = db); // Do this write with majority writeConcern to guarantee that the shard sees the write // when it receives the _flushDatabaseCacheUpdates. @@ -233,7 +236,10 @@ void ShardingCatalogManager::enableSharding(OperationContext* opCtx, Milliseconds{30000}), &unusedResult)); - LOGV2(21939, "Enabling sharding for database [{dbName}] in config db", "dbName"_attr = dbName); + LOGV2(21939, + "Persisted sharding enabled for database {db}", + "Persisted sharding enabled for database", + "db"_attr = dbName); uassertStatusOK(Grid::get(opCtx)->catalogClient()->updateConfigDocument( opCtx, @@ -323,9 +329,10 @@ Status ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx, if (!updateStatus.isOK()) { LOGV2(21940, - "error committing movePrimary: {dbname}{causedBy_updateStatus_getStatus}", - "dbname"_attr = dbname, - "causedBy_updateStatus_getStatus"_attr = causedBy(redact(updateStatus.getStatus()))); + "Error committing movePrimary for {db}: {error}", + "Error committing movePrimary", + "db"_attr = dbname, + "error"_attr = redact(updateStatus.getStatus())); return updateStatus.getStatus(); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp index 4675f7855ad..8acfb24c7d6 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp @@ -162,8 +162,9 @@ StatusWith<Shard::CommandResponse> ShardingCatalogManager::_runCommandForAddShar if (response.status == ErrorCodes::ExceededTimeLimit) { LOGV2(21941, - "Operation timed out with status {response_status}", - "response_status"_attr = redact(response.status)); + "Operation timed out with {error}", + "Operation timed out", + "error"_attr = redact(response.status)); } if (!response.isOK()) { @@ -675,7 +676,8 @@ StatusWith<std::string> ShardingCatalogManager::addShard( } LOGV2(21942, - "going to insert new entry for shard into config.shards: {shardType}", + "Going to insert new entry for shard into config.shards: {shardType}", + "Going to insert new entry for shard into config.shards", "shardType"_attr = shardType.toString()); Status result = Grid::get(opCtx)->catalogClient()->insertConfigDocument( @@ -685,9 +687,10 @@ StatusWith<std::string> ShardingCatalogManager::addShard( ShardingCatalogClient::kLocalWriteConcern); if (!result.isOK()) { LOGV2(21943, - "error adding shard: {shardType} err: {result_reason}", + "Error adding shard: {shardType} err: {error}", + "Error adding shard", "shardType"_attr = shardType.toBSON(), - "result_reason"_attr = result.reason()); + "error"_attr = result.reason()); return result; } } @@ -706,10 +709,10 @@ StatusWith<std::string> ShardingCatalogManager::addShard( ShardingCatalogClient::kLocalWriteConcern); if (!status.isOK()) { LOGV2(21944, - "adding shard {shardConnectionString} even though could not add database " - "{dbName}", - "shardConnectionString"_attr = shardConnectionString.toString(), - "dbName"_attr = dbName); + "Adding shard {connectionString} even though could not add database {db}", + "Adding shard even though we could not add database", + "connectionString"_attr = shardConnectionString.toString(), + "db"_attr = dbName); } } } @@ -785,7 +788,10 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, auto* const catalogClient = Grid::get(opCtx)->catalogClient(); if (!isShardCurrentlyDraining) { - LOGV2(21945, "going to start draining shard: {name}", "name"_attr = name); + LOGV2(21945, + "Going to start draining shard: {shardId}", + "Going to start draining shard", + "shardId"_attr = name); // Record start in changelog uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked( @@ -823,9 +829,13 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, if (chunkCount > 0 || databaseCount > 0) { // Still more draining to do - LOGV2(21946, "chunkCount: {chunkCount}", "chunkCount"_attr = chunkCount); - LOGV2(21947, "databaseCount: {databaseCount}", "databaseCount"_attr = databaseCount); - LOGV2(21948, "jumboCount: {jumboCount}", "jumboCount"_attr = jumboCount); + LOGV2(21946, + "removeShard: draining chunkCount {chunkCount}; databaseCount {databaseCount}; " + "jumboCount {jumboCount}", + "removeShard: draining", + "chunkCount"_attr = chunkCount, + "databaseCount"_attr = databaseCount, + "jumboCount"_attr = jumboCount); return {RemoveShardProgress::ONGOING, boost::optional<RemoveShardProgress::DrainingShardUsage>( @@ -833,7 +843,8 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, } // Draining is done, now finish removing the shard. - LOGV2(21949, "going to remove shard: {name}", "name"_attr = name); + LOGV2( + 21949, "Going to remove shard: {shardId}", "Going to remove shard", "shardId"_attr = name); audit::logRemoveShard(opCtx->getClient(), name); uassertStatusOKWithContext( diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp index 4052917dcec..d5afe01fb36 100644 --- a/src/mongo/db/s/database_sharding_state.cpp +++ b/src/mongo/db/s/database_sharding_state.cpp @@ -121,11 +121,10 @@ void DatabaseShardingState::setDbVersion(OperationContext* opCtx, DSSLock&) { invariant(opCtx->lockState()->isDbLockedForMode(_dbName, MODE_X)); LOGV2(21950, - "setting this node's cached database version for {dbName} to " - "{newDbVersion_newDbVersion_BSONObj}", - "dbName"_attr = _dbName, - "newDbVersion_newDbVersion_BSONObj"_attr = - (newDbVersion ? newDbVersion->toBSON() : BSONObj())); + "Setting this node's cached database version for {db} to {newDbVersion}", + "Setting this node's cached database version", + "db"_attr = _dbName, + "newDbVersion"_attr = (newDbVersion ? newDbVersion->toBSON() : BSONObj())); _dbVersion = newDbVersion; } diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp index 5b5ff581996..4ad4d87c1a6 100644 --- a/src/mongo/db/s/flush_database_cache_updates_command.cpp +++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp @@ -135,8 +135,9 @@ public: if (request().getSyncFromConfig()) { LOGV2_DEBUG(21981, 1, - "Forcing remote routing table refresh for {dbName}", - "dbName"_attr = _dbName()); + "Forcing remote routing table refresh for {db}", + "Forcing remote routing table refresh", + "db"_attr = _dbName()); forceDatabaseRefresh(opCtx, _dbName()); } diff --git a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp index 8430f97e89f..aacf52b30aa 100644 --- a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp +++ b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp @@ -132,8 +132,11 @@ public: oss.waitForMigrationCriticalSectionSignal(opCtx); if (request().getSyncFromConfig()) { - LOGV2_DEBUG( - 21982, 1, "Forcing remote routing table refresh for {ns}", "ns"_attr = ns()); + LOGV2_DEBUG(21982, + 1, + "Forcing remote routing table refresh for {namespace}", + "Forcing remote routing table refresh", + "namespace"_attr = ns()); forceShardFilteringMetadataRefresh(opCtx, ns()); } diff --git a/src/mongo/db/s/merge_chunks_command.cpp b/src/mongo/db/s/merge_chunks_command.cpp index b138ee2ef44..bb6294fa3f7 100644 --- a/src/mongo/db/s/merge_chunks_command.cpp +++ b/src/mongo/db/s/merge_chunks_command.cpp @@ -229,7 +229,8 @@ void mergeChunks(OperationContext* opCtx, checkMetadataForSuccess(opCtx, nss, epoch, ChunkRange(minKey, maxKey))) { LOGV2_DEBUG(21983, 1, - "mergeChunk [{minKey},{maxKey}) has already been committed.", + "mergeChunk interval [{minKey},{maxKey}) has already been committed", + "mergeChunk interval has already been committed", "minKey"_attr = redact(minKey), "maxKey"_attr = redact(maxKey)); return; |