summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorTess Avitabile <tess.avitabile@mongodb.com>2020-02-26 15:39:55 +0000
committerevergreen <evergreen@mongodb.com>2020-02-26 15:39:55 +0000
commitce4f6d6f6e0be8478e1f2f6f728dcc89f4b1e271 (patch)
treeab403b1b448488a2e9881551fdae0eae4ad157da /src/mongo/db
parentdbc00195a18a3aecbcb7b254c3de2056fe25aa24 (diff)
downloadmongo-ce4f6d6f6e0be8478e1f2f6f728dcc89f4b1e271.tar.gz
SERVER-46074 Improve log attributes for repl directory and bridge.cpp
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/repl/all_database_cloner.cpp22
-rw-r--r--src/mongo/db/repl/base_cloner.cpp32
-rw-r--r--src/mongo/db/repl/bgsync.cpp60
-rw-r--r--src/mongo/db/repl/collection_bulk_loader_impl.cpp6
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp21
-rw-r--r--src/mongo/db/repl/data_replicator_external_state_impl.cpp16
-rw-r--r--src/mongo/db/repl/database_cloner.cpp12
-rw-r--r--src/mongo/db/repl/drop_pending_collection_reaper.cpp14
-rw-r--r--src/mongo/db/repl/initial_syncer.cpp84
-rw-r--r--src/mongo/db/repl/isself.cpp40
-rw-r--r--src/mongo/db/repl/member_data.cpp8
-rw-r--r--src/mongo/db/repl/oplog.cpp34
-rw-r--r--src/mongo/db/repl/oplog_applier.cpp6
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp23
-rw-r--r--src/mongo/db/repl/oplog_batcher.cpp4
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp30
-rw-r--r--src/mongo/db/repl/repl_client_info.cpp4
-rw-r--r--src/mongo/db/repl/repl_set_commands.cpp8
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp37
-rw-r--r--src/mongo/db/repl/replication_coordinator_external_state_impl.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp170
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp26
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp47
-rw-r--r--src/mongo/db/repl/replication_process.cpp26
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp24
-rw-r--r--src/mongo/db/repl/reporter.cpp20
-rw-r--r--src/mongo/db/repl/roll_back_local_operations.cpp4
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp114
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp255
-rw-r--r--src/mongo/db/repl/scatter_gather_runner.cpp6
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp12
-rw-r--r--src/mongo/db/repl/sync_source_feedback.cpp9
-rw-r--r--src/mongo/db/repl/sync_source_resolver.cpp63
-rw-r--r--src/mongo/db/repl/task_runner.cpp4
-rw-r--r--src/mongo/db/repl/topology_coordinator.cpp56
35 files changed, 613 insertions, 686 deletions
diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp
index da25493374b..01b3fe81b08 100644
--- a/src/mongo/db/repl/all_database_cloner.cpp
+++ b/src/mongo/db/repl/all_database_cloner.cpp
@@ -107,16 +107,16 @@ BaseCloner::AfterStageBehavior AllDatabaseCloner::listDatabasesStage() {
LOGV2_DEBUG(21055,
1,
"Excluding database due to the 'listDatabases' response not containing a "
- "'name' field for this entry: {dbBSON}",
- "dbBSON"_attr = dbBSON);
+ "'name' field for this entry: {db}",
+ "db"_attr = dbBSON);
continue;
}
const auto& dbName = dbBSON["name"].str();
if (dbName == "local") {
LOGV2_DEBUG(21056,
1,
- "Excluding database from the 'listDatabases' response: {dbBSON}",
- "dbBSON"_attr = dbBSON);
+ "Excluding database from the 'listDatabases' response: {db}",
+ "db"_attr = dbBSON);
continue;
} else {
_databases.emplace_back(dbName);
@@ -153,17 +153,17 @@ void AllDatabaseCloner::postStage() {
if (dbStatus.isOK()) {
LOGV2_DEBUG(21057,
1,
- "Database clone for '{dbName}' finished: {dbStatus}",
+ "Database clone for '{dbName}' finished: {status}",
"dbName"_attr = dbName,
- "dbStatus"_attr = dbStatus);
+ "status"_attr = dbStatus);
} else {
LOGV2_WARNING(21060,
- "database '{dbName}' ({stats_databasesCloned_1} of {databases_size}) "
- "clone failed due to {dbStatus}",
+ "database '{dbName}' ({number} of {total}) "
+ "clone failed due to {status}",
"dbName"_attr = dbName,
- "stats_databasesCloned_1"_attr = (_stats.databasesCloned + 1),
- "databases_size"_attr = _databases.size(),
- "dbStatus"_attr = dbStatus.toString());
+ "number"_attr = (_stats.databasesCloned + 1),
+ "total"_attr = _databases.size(),
+ "status"_attr = dbStatus.toString());
setInitialSyncFailedStatus(dbStatus);
return;
}
diff --git a/src/mongo/db/repl/base_cloner.cpp b/src/mongo/db/repl/base_cloner.cpp
index cbad2e60b11..fcfa04d746a 100644
--- a/src/mongo/db/repl/base_cloner.cpp
+++ b/src/mongo/db/repl/base_cloner.cpp
@@ -93,8 +93,8 @@ Status BaseCloner::run() {
if (!_sharedData->getInitialSyncStatus(lk).isOK()) {
LOGV2(21065,
"Failing data clone because initial sync failed outside data clone: "
- "{sharedData_getInitialSyncStatus_lk}",
- "sharedData_getInitialSyncStatus_lk"_attr = _sharedData->getInitialSyncStatus(lk));
+ "{status}",
+ "status"_attr = _sharedData->getInitialSyncStatus(lk));
}
return _sharedData->getInitialSyncStatus(lk);
}
@@ -119,8 +119,8 @@ void BaseCloner::pauseForFuzzer(BaseClonerStage* stage) {
// initial_sync_test_fixture_test.js, so if you change it here you will need to change
// it there.
LOGV2(21066,
- "Collection Cloner scheduled a remote command on the {describeForFuzzer_stage}",
- "describeForFuzzer_stage"_attr = describeForFuzzer(stage));
+ "Collection Cloner scheduled a remote command on the {stage}",
+ "stage"_attr = describeForFuzzer(stage));
LOGV2(21067, "initialSyncFuzzerSynchronizationPoint1 fail point enabled.");
initialSyncFuzzerSynchronizationPoint1.pauseWhileSet();
@@ -135,9 +135,9 @@ void BaseCloner::pauseForFuzzer(BaseClonerStage* stage) {
BaseCloner::AfterStageBehavior BaseCloner::runStage(BaseClonerStage* stage) {
LOGV2_DEBUG(21069,
1,
- "Cloner {getClonerName} running stage {stage_getName}",
- "getClonerName"_attr = getClonerName(),
- "stage_getName"_attr = stage->getName());
+ "Cloner {cloner} running stage {stage}",
+ "cloner"_attr = getClonerName(),
+ "stage"_attr = stage->getName());
pauseForFuzzer(stage);
auto isThisStageFailPoint = [this, stage](const BSONObj& data) {
return data["stage"].str() == stage->getName() && isMyFailPoint(data);
@@ -145,9 +145,9 @@ BaseCloner::AfterStageBehavior BaseCloner::runStage(BaseClonerStage* stage) {
hangBeforeClonerStage.executeIf(
[&](const BSONObj& data) {
LOGV2(21070,
- "Cloner {getClonerName} hanging before running stage {stage_getName}",
- "getClonerName"_attr = getClonerName(),
- "stage_getName"_attr = stage->getName());
+ "Cloner {cloner} hanging before running stage {stage}",
+ "cloner"_attr = getClonerName(),
+ "stage"_attr = stage->getName());
while (!mustExit() && hangBeforeClonerStage.shouldFail(isThisStageFailPoint)) {
sleepmillis(100);
}
@@ -157,9 +157,9 @@ BaseCloner::AfterStageBehavior BaseCloner::runStage(BaseClonerStage* stage) {
hangAfterClonerStage.executeIf(
[&](const BSONObj& data) {
LOGV2(21071,
- "Cloner {getClonerName} hanging after running stage {stage_getName}",
- "getClonerName"_attr = getClonerName(),
- "stage_getName"_attr = stage->getName());
+ "Cloner {cloner} hanging after running stage {stage}",
+ "cloner"_attr = getClonerName(),
+ "stage"_attr = stage->getName());
while (!mustExit() && hangAfterClonerStage.shouldFail(isThisStageFailPoint)) {
sleepmillis(100);
}
@@ -167,9 +167,9 @@ BaseCloner::AfterStageBehavior BaseCloner::runStage(BaseClonerStage* stage) {
isThisStageFailPoint);
LOGV2_DEBUG(21072,
1,
- "Cloner {getClonerName} finished running stage {stage_getName}",
- "getClonerName"_attr = getClonerName(),
- "stage_getName"_attr = stage->getName());
+ "Cloner {cloner} finished running stage {stage}",
+ "cloner"_attr = getClonerName(),
+ "stage"_attr = stage->getName());
return afterStageBehavior;
}
diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp
index fd1bd90f603..8b2d5943ee6 100644
--- a/src/mongo/db/repl/bgsync.cpp
+++ b/src/mongo/db/repl/bgsync.cpp
@@ -216,8 +216,9 @@ void BackgroundSync::_run() {
sleepmillis(100); // sleep a bit to keep from hammering this thread with temp. errors.
} catch (const std::exception& e2) {
// redact(std::exception&) doesn't work
- LOGV2_FATAL(
- 21127, "sync producer exception: {e2_what}", "e2_what"_attr = redact(e2.what()));
+ LOGV2_FATAL(21127,
+ "sync producer exception: {exception}",
+ "exception"_attr = redact(e2.what()));
fassertFailed(28546);
}
}
@@ -337,10 +338,10 @@ void BackgroundSync::_produce() {
"Our newest OpTime : {lastOpTimeFetched}",
"lastOpTimeFetched"_attr = lastOpTimeFetched);
LOGV2(21082,
- "Earliest OpTime available is {syncSourceResp_earliestOpTimeSeen} from "
- "{syncSourceResp_getSyncSource}",
- "syncSourceResp_earliestOpTimeSeen"_attr = syncSourceResp.earliestOpTimeSeen,
- "syncSourceResp_getSyncSource"_attr = syncSourceResp.getSyncSource());
+ "Earliest OpTime available is {earliestOpTimeSeen} from "
+ "{syncSource}",
+ "earliestOpTimeSeen"_attr = syncSourceResp.earliestOpTimeSeen,
+ "syncSource"_attr = syncSourceResp.getSyncSource());
auto status = _replCoord->abortCatchupIfNeeded(
ReplicationCoordinator::PrimaryCatchUpConclusionReason::kFailedWithError);
if (!status.isOK()) {
@@ -366,8 +367,8 @@ void BackgroundSync::_produce() {
"Our newest OpTime : {lastOpTimeFetched}",
"lastOpTimeFetched"_attr = lastOpTimeFetched);
LOGV2(21085,
- "Earliest OpTime available is {syncSourceResp_earliestOpTimeSeen}",
- "syncSourceResp_earliestOpTimeSeen"_attr = syncSourceResp.earliestOpTimeSeen);
+ "Earliest OpTime available is {earliestOpTimeSeen}",
+ "earliestOpTimeSeen"_attr = syncSourceResp.earliestOpTimeSeen);
LOGV2(21086, "See http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember");
// Activate maintenance mode and transition to RECOVERING.
@@ -382,12 +383,11 @@ void BackgroundSync::_produce() {
status = _replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!status.isOK()) {
LOGV2_WARNING(21117,
- "Failed to transition into {MemberState_MemberState_RS_RECOVERING}. "
- "Current state: {replCoord_getMemberState}{causedBy_status}",
- "MemberState_MemberState_RS_RECOVERING"_attr =
- MemberState(MemberState::RS_RECOVERING),
- "replCoord_getMemberState"_attr = _replCoord->getMemberState(),
- "causedBy_status"_attr = causedBy(status));
+ "Failed to transition into {state}. "
+ "Current state: {currentState}{status}",
+ "state"_attr = MemberState(MemberState::RS_RECOVERING),
+ "currentState"_attr = _replCoord->getMemberState(),
+ "status"_attr = causedBy(status));
// Do not mark ourselves too stale on errors so we can try again next time.
return;
}
@@ -417,21 +417,19 @@ void BackgroundSync::_produce() {
numTimesChoseSameSyncSource.increment(1);
mongo::sleepmillis(sleepMS);
} else {
- LOGV2(
- 21088,
- "Changed sync source from {oldSource_empty_std_string_empty_oldSource} to {source}",
- "oldSource_empty_std_string_empty_oldSource"_attr =
- (oldSource.empty() ? std::string("empty") : oldSource.toString()),
- "source"_attr = source);
+ LOGV2(21088,
+ "Changed sync source from {oldSource} to {source}",
+ "oldSource"_attr =
+ (oldSource.empty() ? std::string("empty") : oldSource.toString()),
+ "source"_attr = source);
numTimesChoseDifferentSyncSource.increment(1);
}
} else {
if (!syncSourceResp.isOK()) {
LOGV2(21089,
"failed to find sync source, received error "
- "{syncSourceResp_syncSourceStatus_getStatus}",
- "syncSourceResp_syncSourceStatus_getStatus"_attr =
- syncSourceResp.syncSourceStatus.getStatus());
+ "{status}",
+ "status"_attr = syncSourceResp.syncSourceStatus.getStatus());
}
long long sleepMS = 1000;
@@ -523,10 +521,9 @@ void BackgroundSync::_produce() {
LOGV2_DEBUG(21092,
logSeverityV1toV2(logLevel).toInt(),
"scheduling fetcher to read remote oplog on {source} starting at "
- "{oplogFetcher_getLastOpTimeFetched_forTest}",
+ "{lastOpTimeFetched}",
"source"_attr = source,
- "oplogFetcher_getLastOpTimeFetched_forTest"_attr =
- oplogFetcher->getLastOpTimeFetched_forTest());
+ "lastOpTimeFetched"_attr = oplogFetcher->getLastOpTimeFetched_forTest());
auto scheduleStatus = oplogFetcher->startup();
if (!scheduleStatus.isOK()) {
LOGV2_WARNING(
@@ -672,8 +669,8 @@ void BackgroundSync::_runRollback(OperationContext* opCtx,
"Starting rollback due to {fetcherReturnStatus}",
"fetcherReturnStatus"_attr = redact(fetcherReturnStatus));
LOGV2(21099,
- "Replication commit point: {replCoord_getLastCommittedOpTime}",
- "replCoord_getLastCommittedOpTime"_attr = _replCoord->getLastCommittedOpTime());
+ "Replication commit point: {lastCommittedOpTime}",
+ "lastCommittedOpTime"_attr = _replCoord->getLastCommittedOpTime());
// TODO: change this to call into the Applier directly to block until the applier is
// drained.
@@ -890,10 +887,9 @@ OpTime BackgroundSync::_readLastAppliedOpTime(OperationContext* opCtx) {
throw;
} catch (const DBException& ex) {
LOGV2_FATAL(21129,
- "Problem reading {NamespaceString_kRsOplogNamespace_ns}: {ex}",
- "NamespaceString_kRsOplogNamespace_ns"_attr =
- NamespaceString::kRsOplogNamespace.ns(),
- "ex"_attr = redact(ex));
+ "Problem reading {namespace}: {exception}",
+ "namespace"_attr = NamespaceString::kRsOplogNamespace.ns(),
+ "exception"_attr = redact(ex));
fassertFailed(18904);
}
diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
index d6cbe4874f4..543d27c30cc 100644
--- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp
+++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp
@@ -212,7 +212,7 @@ Status CollectionBulkLoaderImpl::insertDocuments(const std::vector<BSONObj>::con
Status CollectionBulkLoaderImpl::commit() {
return _runTaskReleaseResourcesOnFailure([&] {
_stats.startBuildingIndexes = Date_t::now();
- LOGV2_DEBUG(21130, 2, "Creating indexes for ns: {nss_ns}", "nss_ns"_attr = _nss.ns());
+ LOGV2_DEBUG(21130, 2, "Creating indexes for ns: {ns}", "ns"_attr = _nss.ns());
UnreplicatedWritesBlock uwb(_opCtx.get());
// Commit before deleting dups, so the dups will be removed from secondary indexes when
@@ -311,8 +311,8 @@ Status CollectionBulkLoaderImpl::commit() {
_stats.endBuildingIndexes = Date_t::now();
LOGV2_DEBUG(21131,
2,
- "Done creating indexes for ns: {nss_ns}, stats: {stats}",
- "nss_ns"_attr = _nss.ns(),
+ "Done creating indexes for ns: {ns}, stats: {stats}",
+ "ns"_attr = _nss.ns(),
"stats"_attr = _stats.toString());
_releaseResources();
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 26ff83f2e2c..458487ab754 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -157,11 +157,10 @@ BaseCloner::AfterStageBehavior CollectionCloner::listIndexesStage() {
? getClient()->getReadyIndexSpecs(_sourceDbAndUuid, QueryOption_SlaveOk)
: getClient()->getIndexSpecs(_sourceDbAndUuid, QueryOption_SlaveOk);
if (indexSpecs.empty()) {
- LOGV2_WARNING(
- 21143,
- "No indexes found for collection {sourceNss_ns} while cloning from {getSource}",
- "sourceNss_ns"_attr = _sourceNss.ns(),
- "getSource"_attr = getSource());
+ LOGV2_WARNING(21143,
+ "No indexes found for collection {collection} while cloning from {source}",
+ "collection"_attr = _sourceNss.ns(),
+ "source"_attr = getSource());
}
for (auto&& spec : indexSpecs) {
if (spec["name"].str() == "_id_"_sd) {
@@ -178,8 +177,8 @@ BaseCloner::AfterStageBehavior CollectionCloner::listIndexesStage() {
if (!_idIndexSpec.isEmpty() && _collectionOptions.autoIndexId == CollectionOptions::NO) {
LOGV2_WARNING(21144,
"Found the _id_ index spec but the collection specified autoIndexId of false "
- "on ns:{this_sourceNss}",
- "this_sourceNss"_attr = this->_sourceNss);
+ "on ns:{ns}",
+ "ns"_attr = this->_sourceNss);
}
return kContinueNormally;
}
@@ -330,8 +329,8 @@ void CollectionCloner::handleNextBatch(DBClientCursorBatchIterator& iter) {
!mustExit()) {
LOGV2(21137,
"initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point "
- "enabled for {sourceNss}. Blocking until fail point is disabled.",
- "sourceNss"_attr = _sourceNss.toString());
+ "enabled for {namespace}. Blocking until fail point is disabled.",
+ "namespace"_attr = _sourceNss.toString());
mongo::sleepsecs(1);
}
},
@@ -350,8 +349,8 @@ void CollectionCloner::insertDocumentsCallback(const executor::TaskExecutor::Cal
std::vector<BSONObj> docs;
if (_documentsToInsert.size() == 0) {
LOGV2_WARNING(21145,
- "insertDocumentsCallback, but no documents to insert for ns:{sourceNss}",
- "sourceNss"_attr = _sourceNss);
+ "insertDocumentsCallback, but no documents to insert for ns:{ns}",
+ "ns"_attr = _sourceNss);
return;
}
_documentsToInsert.swap(docs);
diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.cpp b/src/mongo/db/repl/data_replicator_external_state_impl.cpp
index 0cd255a64db..8114716c251 100644
--- a/src/mongo/db/repl/data_replicator_external_state_impl.cpp
+++ b/src/mongo/db/repl/data_replicator_external_state_impl.cpp
@@ -103,20 +103,20 @@ bool DataReplicatorExternalStateImpl::shouldStopFetching(
if (oqMetadata) {
LOGV2(21150,
"Canceling oplog query due to OplogQueryMetadata. We have to choose a new "
- "sync source. Current source: {source}, OpTime {oqMetadata_getLastOpApplied}, "
- "its sync source index:{oqMetadata_getSyncSourceIndex}",
+ "sync source. Current source: {source}, OpTime {lastAppliedOpTime}, "
+ "its sync source index:{syncSourceIndex}",
"source"_attr = source,
- "oqMetadata_getLastOpApplied"_attr = oqMetadata->getLastOpApplied(),
- "oqMetadata_getSyncSourceIndex"_attr = oqMetadata->getSyncSourceIndex());
+ "lastAppliedOpTime"_attr = oqMetadata->getLastOpApplied(),
+ "syncSourceIndex"_attr = oqMetadata->getSyncSourceIndex());
} else {
LOGV2(21151,
"Canceling oplog query due to ReplSetMetadata. We have to choose a new sync "
- "source. Current source: {source}, OpTime {replMetadata_getLastOpVisible}, its "
- "sync source index:{replMetadata_getSyncSourceIndex}",
+ "source. Current source: {source}, OpTime {lastVisibleOpTime}, its "
+ "sync source index:{syncSourceIndex}",
"source"_attr = source,
- "replMetadata_getLastOpVisible"_attr = replMetadata.getLastOpVisible(),
- "replMetadata_getSyncSourceIndex"_attr = replMetadata.getSyncSourceIndex());
+ "lastVisibleOpTime"_attr = replMetadata.getLastOpVisible(),
+ "syncSourceIndex"_attr = replMetadata.getSyncSourceIndex());
}
return true;
}
diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp
index 67ce43a577b..d6b62b50c50 100644
--- a/src/mongo/db/repl/database_cloner.cpp
+++ b/src/mongo/db/repl/database_cloner.cpp
@@ -90,8 +90,8 @@ BaseCloner::AfterStageBehavior DatabaseCloner::listCollectionsStage() {
if (collectionNamespace.isSystem() && !collectionNamespace.isLegalClientSystemNS()) {
LOGV2_DEBUG(21146,
1,
- "Skipping 'system' collection: {collectionNamespace_ns}",
- "collectionNamespace_ns"_attr = collectionNamespace.ns());
+ "Skipping 'system' collection: {collection}",
+ "collection"_attr = collectionNamespace.ns());
continue;
}
LOGV2_DEBUG(21147, 2, "Allowing cloning of collectionInfo: {info}", "info"_attr = info);
@@ -141,12 +141,12 @@ void DatabaseCloner::postStage() {
auto collStatus = _currentCollectionCloner->run();
if (collStatus.isOK()) {
LOGV2_DEBUG(
- 21148, 1, "collection clone finished: {sourceNss}", "sourceNss"_attr = sourceNss);
+ 21148, 1, "collection clone finished: {namespace}", "namespace"_attr = sourceNss);
} else {
LOGV2_ERROR(21149,
- "collection clone for '{sourceNss}' failed due to {collStatus}",
- "sourceNss"_attr = sourceNss,
- "collStatus"_attr = collStatus.toString());
+ "collection clone for '{namespace}' failed due to {status}",
+ "namespace"_attr = sourceNss,
+ "status"_attr = collStatus.toString());
setInitialSyncFailedStatus(
{ErrorCodes::InitialSyncFailure,
collStatus
diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
index ea837191ef5..d48b63bb318 100644
--- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp
+++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp
@@ -151,10 +151,10 @@ bool DropPendingCollectionReaper::rollBackDropPendingCollection(
}
LOGV2(21152,
- "Rolling back collection drop for {pendingNss} with drop OpTime {opTime} to namespace "
+ "Rolling back collection drop for {pendingNs} with drop OpTime {dropOpTime} to namespace "
"{collectionNamespace}",
- "pendingNss"_attr = pendingNss,
- "opTime"_attr = opTime,
+ "pendingNs"_attr = pendingNss,
+ "dropOpTime"_attr = opTime,
"collectionNamespace"_attr = collectionNamespace);
return true;
@@ -185,9 +185,9 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
const auto& dropOpTime = opTimeAndNamespace.first;
const auto& nss = opTimeAndNamespace.second;
LOGV2(21153,
- "Completing collection drop for {nss} with drop optime {dropOpTime} "
+ "Completing collection drop for {ns} with drop optime {dropOpTime} "
"(notification optime: {opTime})",
- "nss"_attr = nss,
+ "ns"_attr = nss,
"dropOpTime"_attr = dropOpTime,
"opTime"_attr = opTime);
Status status = Status::OK();
@@ -199,9 +199,9 @@ void DropPendingCollectionReaper::dropCollectionsOlderThan(OperationContext* opC
}
if (!status.isOK()) {
LOGV2_WARNING(21155,
- "Failed to remove drop-pending collection {nss} with drop optime "
+ "Failed to remove drop-pending collection {ns} with drop optime "
"{dropOpTime} (notification optime: {opTime}): {status}",
- "nss"_attr = nss,
+ "ns"_attr = nss,
"dropOpTime"_attr = dropOpTime,
"opTime"_attr = opTime,
"status"_attr = status);
diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp
index ce74b1ae19a..575dfaee65d 100644
--- a/src/mongo/db/repl/initial_syncer.cpp
+++ b/src/mongo/db/repl/initial_syncer.cpp
@@ -733,9 +733,9 @@ Status InitialSyncer::_truncateOplogAndDropReplicatedDatabases() {
// truncate oplog; drop user databases.
LOGV2_DEBUG(21171,
1,
- "About to truncate the oplog, if it exists, ns:{opts_localOplogNS}, and drop all "
+ "About to truncate the oplog, if it exists, ns:{ns}, and drop all "
"user databases (so that we can clone them).",
- "opts_localOplogNS"_attr = _opts.localOplogNS);
+ "ns"_attr = _opts.localOplogNS);
auto opCtx = makeOpCtx();
@@ -743,21 +743,15 @@ Status InitialSyncer::_truncateOplogAndDropReplicatedDatabases() {
UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx.get());
// 1.) Truncate the oplog.
- LOGV2_DEBUG(21172,
- 2,
- "Truncating the existing oplog: {opts_localOplogNS}",
- "opts_localOplogNS"_attr = _opts.localOplogNS);
+ LOGV2_DEBUG(21172, 2, "Truncating the existing oplog: {ns}", "ns"_attr = _opts.localOplogNS);
Timer timer;
auto status = _storage->truncateCollection(opCtx.get(), _opts.localOplogNS);
LOGV2(21173,
- "Initial syncer oplog truncation finished in: {timer_millis}ms",
- "timer_millis"_attr = timer.millis());
+ "Initial syncer oplog truncation finished in: {truncationDuration}ms",
+ "truncationDuration"_attr = timer.millis());
if (!status.isOK()) {
// 1a.) Create the oplog.
- LOGV2_DEBUG(21174,
- 2,
- "Creating the oplog: {opts_localOplogNS}",
- "opts_localOplogNS"_attr = _opts.localOplogNS);
+ LOGV2_DEBUG(21174, 2, "Creating the oplog: {ns}", "ns"_attr = _opts.localOplogNS);
status = _storage->createOplog(opCtx.get(), _opts.localOplogNS);
if (!status.isOK()) {
return status;
@@ -1092,16 +1086,15 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
<< _initialSyncState->beginFetchingTimestamp.toBSON());
invariant(!result.getValue().documents.empty());
- LOGV2_DEBUG(
- 21177,
- 2,
- "Setting begin applying timestamp to {initialSyncState_beginApplyingTimestamp} using last "
- "oplog entry: {result_getValue_documents_front}, ns: {opts_localOplogNS} and the begin "
- "fetching timestamp to {initialSyncState_beginFetchingTimestamp}",
- "initialSyncState_beginApplyingTimestamp"_attr = _initialSyncState->beginApplyingTimestamp,
- "result_getValue_documents_front"_attr = redact(result.getValue().documents.front()),
- "opts_localOplogNS"_attr = _opts.localOplogNS,
- "initialSyncState_beginFetchingTimestamp"_attr = _initialSyncState->beginFetchingTimestamp);
+ LOGV2_DEBUG(21177,
+ 2,
+ "Setting begin applying timestamp to {beginApplyingTimestamp} using last "
+ "oplog entry: {lastOplogEntry}, ns: {ns} and the begin "
+ "fetching timestamp to {beginFetchingTimestamp}",
+ "beginApplyingTimestamp"_attr = _initialSyncState->beginApplyingTimestamp,
+ "lastOplogEntry"_attr = redact(result.getValue().documents.front()),
+ "ns"_attr = _opts.localOplogNS,
+ "beginFetchingTimestamp"_attr = _initialSyncState->beginFetchingTimestamp);
const auto configResult = _dataReplicatorExternalState->getCurrentConfig();
status = configResult.getStatus();
@@ -1161,9 +1154,8 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith<Fetcher::QueryResponse>
LOGV2_DEBUG(21180,
2,
- "Starting AllDatabaseCloner: {initialSyncState_allDatabaseCloner}",
- "initialSyncState_allDatabaseCloner"_attr =
- _initialSyncState->allDatabaseCloner->toString());
+ "Starting AllDatabaseCloner: {allDatabaseCloner}",
+ "allDatabaseCloner"_attr = _initialSyncState->allDatabaseCloner->toString());
auto [startClonerFuture, startCloner] =
_initialSyncState->allDatabaseCloner->runOnExecutorEvent(_clonerExec);
@@ -1384,8 +1376,8 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp(
stdx::lock_guard<Latch> lock(_mutex);
_lastApplied = resultOpTimeAndWallTime;
LOGV2(21186,
- "No need to apply operations. (currently at {initialSyncState_stopTimestamp})",
- "initialSyncState_stopTimestamp"_attr = _initialSyncState->stopTimestamp.toBSON());
+ "No need to apply operations. (currently at {stopTimestamp})",
+ "stopTimestamp"_attr = _initialSyncState->stopTimestamp.toBSON());
// This sets the error in 'onCompletionGuard' and shuts down the OplogFetcher on error.
_scheduleRollbackCheckerCheckForRollback_inlock(lock, onCompletionGuard);
@@ -1405,8 +1397,8 @@ void InitialSyncer::_getNextApplierBatchCallback(
auto batchResult = _getNextApplierBatch_inlock();
if (!batchResult.isOK()) {
LOGV2_WARNING(21196,
- "Failure creating next apply batch: {batchResult_getStatus}",
- "batchResult_getStatus"_attr = redact(batchResult.getStatus()));
+ "Failure creating next apply batch: {status}",
+ "status"_attr = redact(batchResult.getStatus()));
onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, batchResult.getStatus());
return;
}
@@ -1579,8 +1571,8 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
if (!scheduleResult.isOK()) {
LOGV2_WARNING(21197,
"Unable to schedule initial syncer completion task due to "
- "{scheduleResult_getStatus}. Running callback on current thread.",
- "scheduleResult_getStatus"_attr = redact(scheduleResult.getStatus()));
+ "{status}. Running callback on current thread.",
+ "status"_attr = redact(scheduleResult.getStatus()));
_finishCallback(result);
}
});
@@ -1589,8 +1581,8 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
stdx::lock_guard<Latch> lock(_mutex);
LOGV2(21192,
- "Initial Sync Attempt Statistics: {getInitialSyncProgress_inlock}",
- "getInitialSyncProgress_inlock"_attr = redact(_getInitialSyncProgress_inlock()));
+ "Initial Sync Attempt Statistics: {statistics}",
+ "statistics"_attr = redact(_getInitialSyncProgress_inlock()));
auto runTime = _initialSyncState ? _initialSyncState->timer.millis() : 0;
int rollBackId = -1;
@@ -1633,11 +1625,11 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWith<OpTimeAndWallTime
LOGV2_ERROR(21200,
"Initial sync attempt failed -- attempts left: "
- "{stats_maxFailedInitialSyncAttempts_stats_failedInitialSyncAttempts} cause: "
- "{result_getStatus}",
- "stats_maxFailedInitialSyncAttempts_stats_failedInitialSyncAttempts"_attr =
+ "{attemptsLeft} cause: "
+ "{status}",
+ "attemptsLeft"_attr =
(_stats.maxFailedInitialSyncAttempts - _stats.failedInitialSyncAttempts),
- "result_getStatus"_attr = redact(result.getStatus()));
+ "status"_attr = redact(result.getStatus()));
// Check if need to do more retries.
if (_stats.failedInitialSyncAttempts >= _stats.maxFailedInitialSyncAttempts) {
@@ -1704,8 +1696,8 @@ void InitialSyncer::_finishCallback(StatusWith<OpTimeAndWallTime> lastApplied) {
onCompletion(lastApplied);
} catch (...) {
LOGV2_WARNING(21198,
- "initial syncer finish callback threw exception: {exceptionToStatus}",
- "exceptionToStatus"_attr = redact(exceptionToStatus()));
+ "initial syncer finish callback threw exception: {exception}",
+ "exception"_attr = redact(exceptionToStatus()));
}
// Destroy the remaining reference to the completion callback before we transition the state to
@@ -1781,15 +1773,13 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock(
// Check if any ops occurred while cloning or any ops need to be fetched.
invariant(_initialSyncState->beginFetchingTimestamp < _initialSyncState->stopTimestamp);
LOGV2(21195,
- "Writing to the oplog and applying operations until {initialSyncState_stopTimestamp} "
+ "Writing to the oplog and applying operations until {stopTimestamp} "
"before initial sync can complete. (started fetching at "
- "{initialSyncState_beginFetchingTimestamp} and applying at "
- "{initialSyncState_beginApplyingTimestamp})",
- "initialSyncState_stopTimestamp"_attr = _initialSyncState->stopTimestamp.toBSON(),
- "initialSyncState_beginFetchingTimestamp"_attr =
- _initialSyncState->beginFetchingTimestamp.toBSON(),
- "initialSyncState_beginApplyingTimestamp"_attr =
- _initialSyncState->beginApplyingTimestamp.toBSON());
+ "{beginFetchingTimestamp} and applying at "
+ "{beginApplyingTimestamp})",
+ "stopTimestamp"_attr = _initialSyncState->stopTimestamp.toBSON(),
+ "beginFetchingTimestamp"_attr = _initialSyncState->beginFetchingTimestamp.toBSON(),
+ "beginApplyingTimestamp"_attr = _initialSyncState->beginApplyingTimestamp.toBSON());
// Fall through to scheduling _getNextApplierBatchCallback().
} else if (_lastApplied.opTime.getTimestamp() >= _initialSyncState->stopTimestamp) {
// Check for rollback if we have applied far enough to be consistent.
diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp
index b4af71d7fdb..3e9c6ca26a7 100644
--- a/src/mongo/db/repl/isself.cpp
+++ b/src/mongo/db/repl/isself.cpp
@@ -121,9 +121,9 @@ std::vector<std::string> getAddrsForHost(const std::string& iporhost,
if (err) {
LOGV2_WARNING(21207,
- "getaddrinfo(\"{iporhost}\") failed: {stringifyError_err}",
- "iporhost"_attr = iporhost,
- "stringifyError_err"_attr = stringifyError(err));
+ "getaddrinfo(\"{host}\") failed: {err}",
+ "host"_attr = iporhost,
+ "err"_attr = stringifyError(err));
return out;
}
@@ -137,9 +137,8 @@ std::vector<std::string> getAddrsForHost(const std::string& iporhost,
err = getnameinfo(
addr->ai_addr, addr->ai_addrlen, host, NI_MAXHOST, nullptr, 0, NI_NUMERICHOST);
if (err) {
- LOGV2_WARNING(21208,
- "getnameinfo() failed: {stringifyError_err}",
- "stringifyError_err"_attr = stringifyError(err));
+ LOGV2_WARNING(
+ 21208, "getnameinfo() failed: {err}", "err"_attr = stringifyError(err));
continue;
}
out.push_back(host);
@@ -152,7 +151,7 @@ std::vector<std::string> getAddrsForHost(const std::string& iporhost,
for (std::vector<std::string>::const_iterator o = out.begin(); o != out.end(); ++o) {
builder << " [ " << *o << "]";
}
- LOGV2_DEBUG(21205, 2, "{builder_str}", "builder_str"_attr = builder.str());
+ LOGV2_DEBUG(21205, 2, "{msg}", "msg"_attr = builder.str());
}
return out;
@@ -216,9 +215,9 @@ bool isSelf(const HostAndPort& hostAndPort, ServiceContext* const ctx) {
return me;
} catch (const std::exception& e) {
LOGV2_WARNING(21209,
- "couldn't check isSelf ({hostAndPort}) {e_what}",
+ "couldn't check isSelf ({hostAndPort}) {exception}",
"hostAndPort"_attr = hostAndPort,
- "e_what"_attr = e.what());
+ "exception"_attr = e.what());
}
return false;
@@ -237,9 +236,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
int err = getifaddrs(&addrs);
if (err) {
- LOGV2_WARNING(21210,
- "getifaddrs failure: {errnoWithDescription_err}",
- "errnoWithDescription_err"_attr = errnoWithDescription(err));
+ LOGV2_WARNING(21210, "getifaddrs failure: {err}", "err"_attr = errnoWithDescription(err));
return out;
}
ON_BLOCK_EXIT([&] { freeifaddrs(addrs); });
@@ -261,9 +258,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
0,
NI_NUMERICHOST);
if (err) {
- LOGV2_WARNING(21211,
- "getnameinfo() failed: {gai_strerror_err}",
- "gai_strerror_err"_attr = gai_strerror(err));
+ LOGV2_WARNING(21211, "getnameinfo() failed: {err}", "err"_attr = gai_strerror(err));
continue;
}
out.push_back(host);
@@ -300,9 +295,8 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
}
if (err != NO_ERROR) {
- LOGV2_WARNING(21212,
- "GetAdaptersAddresses() failed: {errnoWithDescription_err}",
- "errnoWithDescription_err"_attr = errnoWithDescription(err));
+ LOGV2_WARNING(
+ 21212, "GetAdaptersAddresses() failed: {err}", "err"_attr = errnoWithDescription(err));
return out;
}
@@ -321,8 +315,8 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
AF_INET, &(sock->sin_addr), addrstr, INET_ADDRSTRLEN, 0, ec);
if (ec) {
LOGV2_WARNING(21213,
- "inet_ntop failed during IPv4 address conversion: {ec_message}",
- "ec_message"_attr = ec.message());
+ "inet_ntop failed during IPv4 address conversion: {message}",
+ "message"_attr = ec.message());
continue;
}
out.push_back(addrstr);
@@ -335,8 +329,8 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
AF_INET6, &(sock->sin6_addr), addrstr, INET6_ADDRSTRLEN, 0, ec);
if (ec) {
LOGV2_WARNING(21214,
- "inet_ntop failed during IPv6 address conversion: {ec_message}",
- "ec_message"_attr = ec.message());
+ "inet_ntop failed during IPv6 address conversion: {message}",
+ "message"_attr = ec.message());
continue;
}
out.push_back(addrstr);
@@ -352,7 +346,7 @@ std::vector<std::string> getBoundAddrs(const bool ipv6enabled) {
for (std::vector<std::string>::const_iterator o = out.begin(); o != out.end(); ++o) {
builder << " [ " << *o << "]";
}
- LOGV2_DEBUG(21206, 2, "{builder_str}", "builder_str"_attr = builder.str());
+ LOGV2_DEBUG(21206, 2, "{msg}", "msg"_attr = builder.str());
}
return out;
}
diff --git a/src/mongo/db/repl/member_data.cpp b/src/mongo/db/repl/member_data.cpp
index 044f6dd1814..61a717f72e4 100644
--- a/src/mongo/db/repl/member_data.cpp
+++ b/src/mongo/db/repl/member_data.cpp
@@ -71,9 +71,9 @@ bool MemberData::setUpValues(Date_t now, ReplSetHeartbeatResponse&& hbResponse)
if (_lastResponse.getState() != hbResponse.getState()) {
LOGV2_OPTIONS(21215,
{logv2::LogTag::kRS},
- "Member {hostAndPort} is now in state {hbResponse_getState}",
+ "Member {hostAndPort} is now in state {state}",
"hostAndPort"_attr = _hostAndPort.toString(),
- "hbResponse_getState"_attr = hbResponse.getState().toString());
+ "state"_attr = hbResponse.getState().toString());
}
bool opTimeAdvanced =
@@ -158,11 +158,11 @@ void MemberData::setLastDurableOpTimeAndWallTime(OpTimeAndWallTime opTime, Date_
// TODO(russotto): We think this should never happen, rollback or no rollback. Make this an
// invariant and see what happens.
LOGV2(21218,
- "Durable progress ({opTime_opTime}) is ahead of the applied progress "
+ "Durable progress ({durableOpTime}) is ahead of the applied progress "
"({lastAppliedOpTime}. This is likely due to a "
"rollback. memberid: {memberId}{hostAndPort} previous durable progress: "
"{lastDurableOpTime}",
- "opTime_opTime"_attr = opTime.opTime,
+ "durableOpTime"_attr = opTime.opTime,
"lastAppliedOpTime"_attr = _lastAppliedOpTime,
"memberId"_attr = _memberId,
"hostAndPort"_attr = _hostAndPort.toString(),
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 7420fd5ae27..33ba5e3d75d 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -423,12 +423,12 @@ std::vector<OpTime> logInsertOps(OperationContext* opCtx,
sleepBetweenInsertOpTimeGenerationAndLogOp.execute([&](const BSONObj& data) {
auto numMillis = data["waitForMillis"].numberInt();
LOGV2(21244,
- "Sleeping for {numMillis}ms after receiving {count} optimes from {opTimes_front} to "
- "{opTimes_back}",
+ "Sleeping for {numMillis}ms after receiving {count} optimes from {first} to "
+ "{last}",
"numMillis"_attr = numMillis,
"count"_attr = count,
- "opTimes_front"_attr = opTimes.front(),
- "opTimes_back"_attr = opTimes.back());
+ "first"_attr = opTimes.front(),
+ "last"_attr = opTimes.back());
sleepmillis(numMillis);
});
@@ -549,7 +549,7 @@ void createOplog(OperationContext* opCtx,
stringstream ss;
ss << "cmdline oplogsize (" << n << ") different than existing (" << o
<< ") see: http://dochub.mongodb.org/core/increase-oplog";
- LOGV2(21249, "{ss_str}", "ss_str"_attr = ss.str());
+ LOGV2(21249, "{msg}", "msg"_attr = ss.str());
uasserted(13257, ss.str());
}
}
@@ -564,8 +564,8 @@ void createOplog(OperationContext* opCtx,
LOGV2(21250, "******");
LOGV2(21251,
- "creating replication oplog of size: {int_sz_1024_1024}MB...",
- "int_sz_1024_1024"_attr = (int)(sz / (1024 * 1024)));
+ "creating replication oplog of size: {size}MB...",
+ "size"_attr = (int)(sz / (1024 * 1024)));
CollectionOptions options;
options.capped = true;
@@ -804,9 +804,9 @@ const StringMap<ApplyOpMetadata> kOpsMap = {
auto nss = extractNsFromUUIDorNs(opCtx, entry.getNss(), entry.getUuid(), cmd);
if (nss.isDropPendingNamespace()) {
LOGV2(21253,
- "applyCommand: {nss} : collection is already in a drop-pending state: ignoring "
+ "applyCommand: {ns} : collection is already in a drop-pending state: ignoring "
"collection drop: {cmd}",
- "nss"_attr = nss,
+ "ns"_attr = nss,
"cmd"_attr = redact(cmd));
return Status::OK();
}
@@ -945,10 +945,10 @@ Status applyOperation_inlock(OperationContext* opCtx,
auto op = opOrGroupedInserts.getOp();
LOGV2_DEBUG(21254,
3,
- "applying op (or grouped inserts): {opOrGroupedInserts}, oplog application mode: "
- "{OplogApplication_modeToString_mode}",
- "opOrGroupedInserts"_attr = redact(opOrGroupedInserts.toBSON()),
- "OplogApplication_modeToString_mode"_attr = OplogApplication::modeToString(mode));
+ "applying op (or grouped inserts): {op}, oplog application mode: "
+ "{mode}",
+ "op"_attr = redact(opOrGroupedInserts.toBSON()),
+ "mode"_attr = OplogApplication::modeToString(mode));
// Choose opCounters based on running on standalone/primary or secondary by checking
// whether writes are replicated. Atomic applyOps command is an exception, which runs
@@ -1355,9 +1355,9 @@ Status applyCommand_inlock(OperationContext* opCtx,
LOGV2_DEBUG(21255,
3,
"applying command op: {entry}, oplog application mode: "
- "{OplogApplication_modeToString_mode}",
+ "{mode}",
"entry"_attr = redact(entry.toBSON()),
- "OplogApplication_modeToString_mode"_attr = OplogApplication::modeToString(mode));
+ "mode"_attr = OplogApplication::modeToString(mode));
// Only commands are processed here.
invariant(entry.getOpType() == OpTypeEnum::kCommand);
@@ -1525,10 +1525,10 @@ Status applyCommand_inlock(OperationContext* opCtx,
default: {
if (!curOpToApply.acceptableErrors.count(status.code())) {
LOGV2_ERROR(21262,
- "Failed command {o} on {nss_db} with status {status} during oplog "
+ "Failed command {o} on {db} with status {status} during oplog "
"application",
"o"_attr = redact(o),
- "nss_db"_attr = nss.db(),
+ "db"_attr = nss.db(),
"status"_attr = status);
return status;
}
diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp
index 2e01bc28acb..494a5c9b0f1 100644
--- a/src/mongo/db/repl/oplog_applier.cpp
+++ b/src/mongo/db/repl/oplog_applier.cpp
@@ -110,10 +110,8 @@ void OplogApplier::enqueue(OperationContext* opCtx,
OplogBuffer::Batch::const_iterator end) {
static Occasionally sampler;
if (sampler.tick()) {
- LOGV2_DEBUG(21226,
- 2,
- "oplog buffer has {oplogBuffer_getSize} bytes",
- "oplogBuffer_getSize"_attr = _oplogBuffer->getSize());
+ LOGV2_DEBUG(
+ 21226, 2, "oplog buffer has {size} bytes", "size"_attr = _oplogBuffer->getSize());
}
_oplogBuffer->push(opCtx, begin, end);
}
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index e196704966f..35557b1309d 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -144,7 +144,7 @@ Status finishAndLogApply(OperationContext* opCtx,
s << redact(entryOrGroupedInserts.toBSON());
s << ", took " << opDuration << "ms";
- LOGV2(21228, "{s_str}", "s_str"_attr = s.str());
+ LOGV2(21228, "{msg}", "msg"_attr = s.str());
}
}
}
@@ -623,7 +623,7 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
std::vector<OplogEntry> ops) {
invariant(!ops.empty());
- LOGV2_DEBUG(21230, 2, "replication batch size is {ops_size}", "ops_size"_attr = ops.size());
+ LOGV2_DEBUG(21230, 2, "replication batch size is {size}", "size"_attr = ops.size());
// Stop all readers until we're done. This also prevents doc-locking engines from deleting old
// entries from the oplog until we finish writing.
@@ -723,14 +723,13 @@ StatusWith<OpTime> OplogApplierImpl::_applyOplogBatch(OperationContext* opCtx,
if (!status.isOK()) {
LOGV2_FATAL(21235,
"Failed to apply batch of operations. Number of operations in "
- "batch: {ops_size}. First operation: {ops_front}. Last operation: "
- "{ops_back}. Oplog application failed in writer thread "
- "{std_distance_statusVector_cbegin_it}: {status}",
- "ops_size"_attr = ops.size(),
- "ops_front"_attr = redact(ops.front().toBSON()),
- "ops_back"_attr = redact(ops.back().toBSON()),
- "std_distance_statusVector_cbegin_it"_attr =
- std::distance(statusVector.cbegin(), it),
+ "batch: {size}. First operation: {first}. Last operation: "
+ "{last}. Oplog application failed in writer thread "
+ "{thread}: {status}",
+ "size"_attr = ops.size(),
+ "first"_attr = redact(ops.front().toBSON()),
+ "last"_attr = redact(ops.back().toBSON()),
+ "thread"_attr = std::distance(statusVector.cbegin(), it),
"status"_attr = redact(status));
return status;
}
@@ -1076,9 +1075,9 @@ Status OplogApplierImpl::applyOplogBatchPerWorker(OperationContext* opCtx,
}
LOGV2_FATAL(21237,
- "Error applying operation ({entry}): {causedBy_status}",
+ "Error applying operation ({entry}): {status}",
"entry"_attr = redact(entry.toBSON()),
- "causedBy_status"_attr = causedBy(redact(status)));
+ "status"_attr = causedBy(redact(status)));
return status;
}
} catch (const DBException& e) {
diff --git a/src/mongo/db/repl/oplog_batcher.cpp b/src/mongo/db/repl/oplog_batcher.cpp
index a36c8b77b4d..21d154f2e39 100644
--- a/src/mongo/db/repl/oplog_batcher.cpp
+++ b/src/mongo/db/repl/oplog_batcher.cpp
@@ -320,8 +320,8 @@ void OplogBatcher::_run(StorageInterface* storageInterface) {
if (isDraining && _oplogBuffer->isEmpty()) {
ops.setTermWhenExhausted(termWhenBufferIsEmpty);
LOGV2(21239,
- "Oplog buffer has been drained in term {termWhenBufferIsEmpty}",
- "termWhenBufferIsEmpty"_attr = termWhenBufferIsEmpty);
+ "Oplog buffer has been drained in term {term}",
+ "term"_attr = termWhenBufferIsEmpty);
} else {
// Don't emit empty batches.
continue;
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 09c72ceef4b..ffa9e7a1e70 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -734,7 +734,7 @@ Status OplogFetcher::_onSuccessfulBatch(const Documents& documents) {
[&](auto&&) {
status = {ErrorCodes::FailPointEnabled,
"stopReplProducerOnDocument fail point is enabled."};
- LOGV2(21269, "{status_reason}", "status_reason"_attr = status.reason());
+ LOGV2(21269, "{status}", "status"_attr = status.reason());
},
[&](const BSONObj& data) {
auto opCtx = cc().makeOperationContext();
@@ -758,11 +758,11 @@ Status OplogFetcher::_onSuccessfulBatch(const Documents& documents) {
if (!documents.empty()) {
LOGV2_DEBUG(21270,
2,
- "oplog fetcher read {documents_size} operations from remote oplog starting at "
- "{documents_front_ts} and ending at {documents_back_ts}",
- "documents_size"_attr = documents.size(),
- "documents_front_ts"_attr = documents.front()["ts"],
- "documents_back_ts"_attr = documents.back()["ts"]);
+ "oplog fetcher read {operations} operations from remote oplog starting at "
+ "{first} and ending at {last}",
+ "operations"_attr = documents.size(),
+ "first"_attr = documents.front()["ts"],
+ "last"_attr = documents.back()["ts"]);
} else {
LOGV2_DEBUG(21271, 2, "oplog fetcher read 0 operations from remote oplog");
}
@@ -771,10 +771,10 @@ Status OplogFetcher::_onSuccessfulBatch(const Documents& documents) {
if (!oqMetadataResult.isOK()) {
LOGV2_ERROR(21278,
"invalid oplog query metadata from sync source {source}: "
- "{oqMetadataResult_getStatus}: {metadataObj}",
+ "{status}: {metadata}",
"source"_attr = _source,
- "oqMetadataResult_getStatus"_attr = oqMetadataResult.getStatus(),
- "metadataObj"_attr = _metadataObj);
+ "status"_attr = oqMetadataResult.getStatus(),
+ "metadata"_attr = _metadataObj);
return oqMetadataResult.getStatus();
}
auto oqMetadata = oqMetadataResult.getValue();
@@ -832,10 +832,10 @@ Status OplogFetcher::_onSuccessfulBatch(const Documents& documents) {
if (!metadataResult.isOK()) {
LOGV2_ERROR(21279,
"invalid replication metadata from sync source {source}: "
- "{metadataResult_getStatus}: {metadataObj}",
+ "{status}: {metadata}",
"source"_attr = _source,
- "metadataResult_getStatus"_attr = metadataResult.getStatus(),
- "metadataObj"_attr = _metadataObj);
+ "status"_attr = metadataResult.getStatus(),
+ "metadata"_attr = _metadataObj);
return metadataResult.getStatus();
}
replSetMetadata = metadataResult.getValue();
@@ -911,10 +911,10 @@ bool OplogFetcher::OplogFetcherRestartDecisionDefault::shouldContinue(OplogFetch
}
LOGV2(21275,
"Recreating cursor for oplog fetcher due to error: {status}. Last fetched optime: "
- "{fetcher_getLastOpTimeFetched}. Attempts remaining: {maxRestarts_numRestarts}",
+ "{lastOpTimeFetched}. Attempts remaining: {attemptsRemaining}",
"status"_attr = redact(status),
- "fetcher_getLastOpTimeFetched"_attr = fetcher->_getLastOpTimeFetched(),
- "maxRestarts_numRestarts"_attr = (_maxRestarts - _numRestarts));
+ "lastOpTimeFetched"_attr = fetcher->_getLastOpTimeFetched(),
+ "attemptsRemaining"_attr = (_maxRestarts - _numRestarts));
_numRestarts++;
return true;
}
diff --git a/src/mongo/db/repl/repl_client_info.cpp b/src/mongo/db/repl/repl_client_info.cpp
index d7c222c9b12..e3e5013c0ac 100644
--- a/src/mongo/db/repl/repl_client_info.cpp
+++ b/src/mongo/db/repl/repl_client_info.cpp
@@ -127,8 +127,8 @@ void ReplClientInfo::setLastOpToSystemLastOpTimeIgnoringInterrupt(OperationConte
// OperationContext to wait for writeConcern anyways.
LOGV2_DEBUG(21281,
2,
- "Ignoring set last op interruption error: {e_toStatus}",
- "e_toStatus"_attr = e.toStatus());
+ "Ignoring set last op interruption error: {exception}",
+ "exception"_attr = e.toStatus());
}
}
diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp
index 8203c36f567..de939cc087a 100644
--- a/src/mongo/db/repl/repl_set_commands.cpp
+++ b/src/mongo/db/repl/repl_set_commands.cpp
@@ -739,9 +739,7 @@ public:
status = ReplicationCoordinator::get(opCtx)->stepUpIfEligible(skipDryRun);
if (!status.isOK()) {
- LOGV2(21582,
- "replSetStepUp request failed{causedBy_status}",
- "causedBy_status"_attr = causedBy(status));
+ LOGV2(21582, "replSetStepUp request failed{status}", "status"_attr = causedBy(status));
}
uassertStatusOK(status);
@@ -777,8 +775,8 @@ public:
kFailedWithReplSetAbortPrimaryCatchUpCmd);
if (!status.isOK()) {
LOGV2(21584,
- "replSetAbortPrimaryCatchUp request failed{causedBy_status}",
- "causedBy_status"_attr = causedBy(status));
+ "replSetAbortPrimaryCatchUp request failed{status}",
+ "status"_attr = causedBy(status));
}
uassertStatusOK(status);
return true;
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index c0a4cbba3e9..ca9f654d934 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -126,8 +126,7 @@ bool ReplicationConsistencyMarkersImpl::getInitialSyncFlag(OperationContext* opC
return false;
}
- LOGV2_DEBUG(
- 21285, 3, "returning initial sync flag value of {flag_get}", "flag_get"_attr = flag.get());
+ LOGV2_DEBUG(21285, 3, "returning initial sync flag value of {flag}", "flag"_attr = flag.get());
return flag.get();
}
@@ -189,9 +188,9 @@ OpTime ReplicationConsistencyMarkersImpl::getMinValid(OperationContext* opCtx) c
LOGV2_DEBUG(21288,
3,
- "returning minvalid: {minValid}({minValid2})",
- "minValid"_attr = minValid.toString(),
- "minValid2"_attr = minValid.toBSON());
+ "returning minvalid: {minValidString}({minValidBSON})",
+ "minValidString"_attr = minValid.toString(),
+ "minValidBSON"_attr = minValid.toBSON());
return minValid;
}
@@ -200,9 +199,9 @@ void ReplicationConsistencyMarkersImpl::setMinValid(OperationContext* opCtx,
const OpTime& minValid) {
LOGV2_DEBUG(21289,
3,
- "setting minvalid to exactly: {minValid}({minValid2})",
- "minValid"_attr = minValid.toString(),
- "minValid2"_attr = minValid.toBSON());
+ "setting minvalid to exactly: {minValidString}({minValidBSON})",
+ "minValidString"_attr = minValid.toString(),
+ "minValidBSON"_attr = minValid.toBSON());
TimestampedBSONObj update;
update.obj =
BSON("$set" << BSON(MinValidDocument::kMinValidTimestampFieldName
@@ -221,9 +220,9 @@ void ReplicationConsistencyMarkersImpl::setMinValidToAtLeast(OperationContext* o
const OpTime& minValid) {
LOGV2_DEBUG(21290,
3,
- "setting minvalid to at least: {minValid}({minValid2})",
- "minValid"_attr = minValid.toString(),
- "minValid2"_attr = minValid.toBSON());
+ "setting minvalid to at least: {minValidString}({minValidBSON})",
+ "minValidString"_attr = minValid.toString(),
+ "minValidBSON"_attr = minValid.toBSON());
auto& termField = MinValidDocument::kMinValidTermFieldName;
auto& tsField = MinValidDocument::kMinValidTimestampFieldName;
@@ -265,9 +264,9 @@ void ReplicationConsistencyMarkersImpl::setAppliedThrough(OperationContext* opCt
invariant(!optime.isNull());
LOGV2_DEBUG(21291,
3,
- "setting appliedThrough to: {optime}({optime2})",
- "optime"_attr = optime.toString(),
- "optime2"_attr = optime.toBSON());
+ "setting appliedThrough to: {optimeString}({optimeBSON})",
+ "optimeString"_attr = optime.toString(),
+ "optimeBSON"_attr = optime.toBSON());
// We set the 'appliedThrough' to the provided timestamp. The 'appliedThrough' is only valid
// in checkpoints that contain all writes through this timestamp since it indicates the top of
@@ -307,9 +306,9 @@ OpTime ReplicationConsistencyMarkersImpl::getAppliedThrough(OperationContext* op
}
LOGV2_DEBUG(21294,
3,
- "returning appliedThrough: {appliedThrough}({appliedThrough2})",
- "appliedThrough"_attr = appliedThrough->toString(),
- "appliedThrough2"_attr = appliedThrough->toBSON());
+ "returning appliedThrough: {appliedThroughString}({appliedThroughBSON})",
+ "appliedThroughString"_attr = appliedThrough->toString(),
+ "appliedThroughBSON"_attr = appliedThrough->toBSON());
return appliedThrough.get();
}
@@ -339,9 +338,9 @@ void ReplicationConsistencyMarkersImpl::ensureFastCountOnOplogTruncateAfterPoint
OperationContext* opCtx) {
LOGV2_DEBUG(21295,
3,
- "Updating cached fast-count on collection {oplogTruncateAfterPointNss} in case an "
+ "Updating cached fast-count on collection {oplogTruncateAfterPointNs} in case an "
"unclean shutdown caused it to become incorrect.",
- "oplogTruncateAfterPointNss"_attr = _oplogTruncateAfterPointNss);
+ "oplogTruncateAfterPointNs"_attr = _oplogTruncateAfterPointNss);
auto result = _storageInterface->findSingleton(opCtx, _oplogTruncateAfterPointNss);
diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
index 4d560f97709..0e5cf9f73cd 100644
--- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp
@@ -879,7 +879,7 @@ void ReplicationCoordinatorExternalStateImpl::_dropAllTempCollections(OperationC
// replica set members.
if (*it == "local")
continue;
- LOGV2_DEBUG(21309, 2, "Removing temporary collections from {it}", "it"_attr = *it);
+ LOGV2_DEBUG(21309, 2, "Removing temporary collections from {db}", "db"_attr = *it);
AutoGetDb autoDb(opCtx, *it, MODE_IX);
invariant(autoDb.getDb(), str::stream() << "Unable to get reference to database " << *it);
autoDb.getDb()->clearTmpCollections(opCtx);
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 921a0f2c091..63af05ccc0b 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -455,8 +455,8 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
StatusWith<LastVote> lastVote = _externalState->loadLocalLastVoteDocument(opCtx);
if (!lastVote.isOK()) {
LOGV2_FATAL(21429,
- "Error loading local voted for document at startup; {lastVote_getStatus}",
- "lastVote_getStatus"_attr = lastVote.getStatus());
+ "Error loading local voted for document at startup; {status}",
+ "status"_attr = lastVote.getStatus());
fassertFailedNoTrace(40367);
}
if (lastVote.getValue().getTerm() == OpTime::kInitialTerm) {
@@ -486,8 +486,8 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
StatusWith<BSONObj> cfg = _externalState->loadLocalConfigDocument(opCtx);
if (!cfg.isOK()) {
LOGV2(21313,
- "Did not find local replica set configuration document at startup; {cfg_getStatus}",
- "cfg_getStatus"_attr = cfg.getStatus());
+ "Did not find local replica set configuration document at startup; {status}",
+ "status"_attr = cfg.getStatus());
return true;
}
ReplSetConfig localConfig;
@@ -508,9 +508,9 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig(OperationContext* opCtx)
"Locally stored replica set configuration does not parse; See "
"http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config "
"for information on how to recover from this. Got \"{status}\" while parsing "
- "{cfg_getValue}",
+ "{config}",
"status"_attr = status,
- "cfg_getValue"_attr = cfg.getValue());
+ "config"_attr = cfg.getValue());
fassertFailedNoTrace(28545);
}
@@ -557,8 +557,8 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
if (!cbData.status.isOK()) {
LOGV2_DEBUG(21314,
1,
- "Loading local replica set configuration failed due to {cbData_status}",
- "cbData_status"_attr = cbData.status);
+ "Loading local replica set configuration failed due to {status}",
+ "status"_attr = cbData.status);
return;
}
@@ -570,17 +570,17 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
LOGV2_WARNING(21405,
"Locally stored replica set configuration does not have a valid entry "
"for the current node; waiting for reconfig or remote heartbeat; Got "
- "\"{myIndex_getStatus}\" while validating {localConfig}",
- "myIndex_getStatus"_attr = myIndex.getStatus(),
+ "\"{status}\" while validating {localConfig}",
+ "status"_attr = myIndex.getStatus(),
"localConfig"_attr = localConfig.toBSON());
myIndex = StatusWith<int>(-1);
} else {
LOGV2_ERROR(21415,
"Locally stored replica set configuration is invalid; See "
"http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config"
- " for information on how to recover from this. Got \"{myIndex_getStatus}\" "
+ " for information on how to recover from this. Got \"{status}\" "
"while validating {localConfig}",
- "myIndex_getStatus"_attr = myIndex.getStatus(),
+ "status"_attr = myIndex.getStatus(),
"localConfig"_attr = localConfig.toBSON());
fassertFailedNoTrace(28544);
}
@@ -589,10 +589,10 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
if (localConfig.getReplSetName() != _settings.ourSetName()) {
LOGV2_WARNING(21406,
"Local replica set configuration document reports set name of "
- "{localConfig_getReplSetName}, but command line reports "
- "{settings_ourSetName}; waiting for reconfig or remote heartbeat",
- "localConfig_getReplSetName"_attr = localConfig.getReplSetName(),
- "settings_ourSetName"_attr = _settings.ourSetName());
+ "{localConfigSetName}, but command line reports "
+ "{settingsSetName}; waiting for reconfig or remote heartbeat",
+ "localConfigSetName"_attr = localConfig.getReplSetName(),
+ "settingsSetName"_attr = _settings.ourSetName());
myIndex = StatusWith<int>(-1);
}
@@ -623,9 +623,8 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig(
LOGV2_WARNING(
21407,
"Failed to load timestamp and/or wall clock time of most recently applied "
- "operation: {lastOpTimeAndWallTimeStatus_getStatus}",
- "lastOpTimeAndWallTimeStatus_getStatus"_attr =
- lastOpTimeAndWallTimeStatus.getStatus());
+ "operation: {status}",
+ "status"_attr = lastOpTimeAndWallTimeStatus.getStatus());
} else {
lastOpTimeAndWallTime = lastOpTimeAndWallTimeStatus.getValue();
}
@@ -747,14 +746,14 @@ void ReplicationCoordinatorImpl::_startDataReplication(OperationContext* opCtx,
stdx::lock_guard<Latch> lock(_mutex);
if (opTimeStatus == ErrorCodes::CallbackCanceled) {
LOGV2(21324,
- "Initial Sync has been cancelled: {opTimeStatus_getStatus}",
- "opTimeStatus_getStatus"_attr = opTimeStatus.getStatus());
+ "Initial Sync has been cancelled: {status}",
+ "status"_attr = opTimeStatus.getStatus());
return;
} else if (!opTimeStatus.isOK()) {
if (_inShutdown) {
LOGV2(21325,
- "Initial Sync failed during shutdown due to {opTimeStatus_getStatus}",
- "opTimeStatus_getStatus"_attr = opTimeStatus.getStatus());
+ "Initial Sync failed during shutdown due to {status}",
+ "status"_attr = opTimeStatus.getStatus());
return;
} else {
LOGV2_ERROR(21416,
@@ -1122,9 +1121,7 @@ void ReplicationCoordinatorImpl::signalDrainComplete(OperationContext* opCtx,
auto status = _topCoord->completeTransitionToPrimary(firstOpTime);
if (status.code() == ErrorCodes::PrimarySteppedDown) {
- LOGV2(21330,
- "Transition to primary failed{causedBy_status}",
- "causedBy_status"_attr = causedBy(status));
+ LOGV2(21330, "Transition to primary failed{status}", "status"_attr = causedBy(status));
return;
}
invariant(status);
@@ -1471,12 +1468,11 @@ Status ReplicationCoordinatorImpl::_waitUntilOpTime(OperationContext* opCtx,
LOGV2_DEBUG(21333,
3,
- "waitUntilOpTime: OpID {opCtx_getOpID} is waiting for OpTime "
- "{targetOpTime} until {deadline_value_or_opCtx_getDeadline}",
- "opCtx_getOpID"_attr = opCtx->getOpID(),
+ "waitUntilOpTime: OpID {OpID} is waiting for OpTime "
+ "{targetOpTime} until {deadline}",
+ "OpID"_attr = opCtx->getOpID(),
"targetOpTime"_attr = targetOpTime,
- "deadline_value_or_opCtx_getDeadline"_attr =
- deadline.value_or(opCtx->getDeadline()));
+ "deadline"_attr = deadline.value_or(opCtx->getDeadline()));
lock.unlock();
auto waitStatus = futureGetNoThrowWithDeadline(
@@ -1522,16 +1518,15 @@ Status ReplicationCoordinatorImpl::_waitUntilMajorityOpTime(mongo::OperationCont
LOGV2_DEBUG(21334,
1,
"waitUntilOpTime: waiting for optime:{targetOpTime} to be in a snapshot -- current "
- "snapshot: {getCurrentCommittedSnapshotOpTime_inlock}",
+ "snapshot: {currentCommittedSnapshotOpTime}",
"targetOpTime"_attr = targetOpTime,
- "getCurrentCommittedSnapshotOpTime_inlock"_attr =
+ "currentCommittedSnapshotOpTime"_attr =
_getCurrentCommittedSnapshotOpTime_inlock());
- LOGV2_DEBUG(
- 21335,
- 3,
- "waitUntilOpTime: waiting for a new snapshot until {deadline_value_or_opCtx_getDeadline}",
- "deadline_value_or_opCtx_getDeadline"_attr = deadline.value_or(opCtx->getDeadline()));
+ LOGV2_DEBUG(21335,
+ 3,
+ "waitUntilOpTime: waiting for a new snapshot until {deadline}",
+ "deadline"_attr = deadline.value_or(opCtx->getDeadline()));
try {
auto ok = opCtx->waitForConditionOrInterruptUntil(
@@ -1724,9 +1719,9 @@ bool ReplicationCoordinatorImpl::_doneWaitingForReplication_inlock(
LOGV2_DEBUG(21337,
1,
"Required snapshot optime: {opTime} is not yet part of the current "
- "'committed' snapshot: {currentCommittedSnapshot_opTime}",
+ "'committed' snapshot: {currentCommittedSnapshotOpTime}",
"opTime"_attr = opTime,
- "currentCommittedSnapshot_opTime"_attr =
+ "currentCommittedSnapshotOpTime"_attr =
_currentCommittedSnapshot->opTime);
return false;
}
@@ -1809,11 +1804,11 @@ ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitRepli
stdx::lock_guard lock(_mutex);
LOGV2(21339,
"Replication failed for write concern: {writeConcern}, waiting for optime: {opTime}, "
- "opID: {opCtx_getOpID}, progress: {getReplicationProgress_lock}",
+ "opID: {opID}, progress: {progress}",
"writeConcern"_attr = writeConcern.toBSON(),
"opTime"_attr = opTime,
- "opCtx_getOpID"_attr = opCtx->getOpID(),
- "getReplicationProgress_lock"_attr = _getReplicationProgress(lock));
+ "opID"_attr = opCtx->getOpID(),
+ "progress"_attr = _getReplicationProgress(lock));
}
return {std::move(status), duration_cast<Milliseconds>(timer.elapsed())};
}
@@ -1964,7 +1959,7 @@ void ReplicationCoordinatorImpl::updateAndLogStateTransitionMetrics(
bob.appendNumber("userOpsKilled", userOpsKilled.get());
bob.appendNumber("userOpsRunning", userOpsRunning.get());
- LOGV2(21340, "State transition ops metrics: {bob_obj}", "bob_obj"_attr = bob.obj());
+ LOGV2(21340, "State transition ops metrics: {metrics}", "metrics"_attr = bob.obj());
}
std::shared_ptr<IsMasterResponse> ReplicationCoordinatorImpl::_makeIsMasterResponse(
@@ -2099,8 +2094,8 @@ std::shared_ptr<const IsMasterResponse> ReplicationCoordinatorImpl::awaitIsMaste
LOGV2_DEBUG(21342,
1,
"Waiting for an isMaster response from a topology change or until deadline: "
- "{deadline_get}. Current TopologyVersion counter is {topologyVersionCounter}",
- "deadline_get"_attr = deadline.get(),
+ "{deadline}. Current TopologyVersion counter is {topologyVersionCounter}",
+ "deadline"_attr = deadline.get(),
"topologyVersionCounter"_attr = topologyVersion.getCounter());
auto statusWithIsMaster =
futureGetNoThrowWithDeadline(opCtx, future, deadline.get(), opCtx->getTimeoutError());
@@ -2480,9 +2475,9 @@ void ReplicationCoordinatorImpl::_performElectionHandoff() {
LOGV2_DEBUG(21348,
1,
"replSetStepUp request to {target} succeeded with response -- "
- "{callbackData_response_data}",
+ "{response}",
"target"_attr = target,
- "callbackData_response_data"_attr = callbackData.response.data);
+ "response"_attr = callbackData.response.data);
} else {
LOGV2(21349,
"replSetStepUp request to {target} failed due to {status}",
@@ -2495,9 +2490,9 @@ void ReplicationCoordinatorImpl::_performElectionHandoff() {
if (!callbackHandleStatus.isOK()) {
LOGV2_ERROR(21417,
"Failed to schedule ReplSetStepUp request to {target} for election handoff: "
- "{callbackHandleStatus}",
+ "{status}",
"target"_attr = target,
- "callbackHandleStatus"_attr = callbackHandleStatus);
+ "status"_attr = callbackHandleStatus);
}
}
@@ -2835,9 +2830,9 @@ Status ReplicationCoordinatorImpl::setMaintenanceMode(bool activate) {
LOGV2_OPTIONS(21351,
{logv2::LogTag::kRS},
- "leaving maintenance mode ({curMaintenanceCalls_1} other maintenance mode "
+ "leaving maintenance mode ({curMaintenanceCalls} other maintenance mode "
"tasks ongoing)",
- "curMaintenanceCalls_1"_attr = curMaintenanceCalls - 1);
+ "curMaintenanceCalls"_attr = curMaintenanceCalls - 1);
} else {
LOGV2_WARNING(21411, "Attempted to leave maintenance mode but it is not currently active");
return Status(ErrorCodes::OperationFailed, "already out of maintenance mode");
@@ -2893,8 +2888,8 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
const ReplSetReconfigArgs& args,
BSONObjBuilder* resultObj) {
LOGV2(21352,
- "replSetReconfig admin command received from client; new config: {args_newConfigObj}",
- "args_newConfigObj"_attr = args.newConfigObj);
+ "replSetReconfig admin command received from client; new config: {newConfig}",
+ "newConfig"_attr = args.newConfigObj);
stdx::unique_lock<Latch> lk(_mutex);
@@ -2919,8 +2914,8 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
"its configuration");
default:
LOGV2_FATAL(21432,
- "Unexpected _rsConfigState {int_rsConfigState}",
- "int_rsConfigState"_attr = int(_rsConfigState));
+ "Unexpected _rsConfigState {_rsConfigState}",
+ "_rsConfigState"_attr = int(_rsConfigState));
fassertFailed(18914);
}
@@ -2989,7 +2984,7 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
str::stream errmsg;
errmsg << "Attempting to reconfigure a replica set with name " << newConfig.getReplSetName()
<< ", but command line reports " << _settings.ourSetName() << "; rejecting";
- LOGV2_ERROR(21419, "{std_string_errmsg}", "std_string_errmsg"_attr = std::string(errmsg));
+ LOGV2_ERROR(21419, "{errmsg}", "errmsg"_attr = std::string(errmsg));
return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
}
@@ -2997,16 +2992,16 @@ Status ReplicationCoordinatorImpl::processReplSetReconfig(OperationContext* opCt
_externalState.get(), oldConfig, newConfig, opCtx->getServiceContext(), args.force);
if (!myIndex.isOK()) {
LOGV2_ERROR(21420,
- "replSetReconfig got {myIndex_getStatus} while validating {newConfigObj}",
- "myIndex_getStatus"_attr = myIndex.getStatus(),
+ "replSetReconfig got {status} while validating {newConfigObj}",
+ "status"_attr = myIndex.getStatus(),
"newConfigObj"_attr = newConfigObj);
return Status(ErrorCodes::NewReplicaSetConfigurationIncompatible,
myIndex.getStatus().reason());
}
LOGV2(21353,
- "replSetReconfig config object with {newConfig_getNumMembers} members parses ok",
- "newConfig_getNumMembers"_attr = newConfig.getNumMembers());
+ "replSetReconfig config object with {numMembers} members parses ok",
+ "numMembers"_attr = newConfig.getNumMembers());
if (!args.force &&
!serverGlobalParams.featureCompatibility.isVersion(
@@ -3121,8 +3116,8 @@ void ReplicationCoordinatorImpl::_finishReplSetReconfig(OperationContext* opCtx,
LOGV2_DEBUG(21354,
2,
"Waiting for election to complete before finishing reconfig to config with "
- "{newConfig_getConfigVersionAndTerm}",
- "newConfig_getConfigVersionAndTerm"_attr = newConfig.getConfigVersionAndTerm());
+ "{configVersionAndTerm}",
+ "configVersionAndTerm"_attr = newConfig.getConfigVersionAndTerm());
// Wait for the election to complete and the node's Role to be set to follower.
_replExecutor->waitForEvent(electionFinishedEvent);
}
@@ -3231,7 +3226,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
str::stream errmsg;
errmsg << "Attempting to initiate a replica set with name " << newConfig.getReplSetName()
<< ", but command line reports " << _settings.ourSetName() << "; rejecting";
- LOGV2_ERROR(21424, "{std_string_errmsg}", "std_string_errmsg"_attr = std::string(errmsg));
+ LOGV2_ERROR(21424, "{errmsg}", "errmsg"_attr = std::string(errmsg));
return Status(ErrorCodes::InvalidReplicaSetConfig, errmsg);
}
@@ -3239,15 +3234,15 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt
validateConfigForInitiate(_externalState.get(), newConfig, opCtx->getServiceContext());
if (!myIndex.isOK()) {
LOGV2_ERROR(21425,
- "replSet initiate got {myIndex_getStatus} while validating {configObj}",
- "myIndex_getStatus"_attr = myIndex.getStatus(),
+ "replSet initiate got {status} while validating {configObj}",
+ "status"_attr = myIndex.getStatus(),
"configObj"_attr = configObj);
return Status(ErrorCodes::InvalidReplicaSetConfig, myIndex.getStatus().reason());
}
LOGV2(21357,
- "replSetInitiate config object with {newConfig_getNumMembers} members parses ok",
- "newConfig_getNumMembers"_attr = newConfig.getNumMembers());
+ "replSetInitiate config object with {numMembers} members parses ok",
+ "numMembers"_attr = newConfig.getNumMembers());
// In pv1, the TopologyCoordinator has not set the term yet. It will be set to kInitialTerm if
// the initiate succeeds so we pass that here.
@@ -3529,8 +3524,8 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction(
break;
default:
LOGV2_FATAL(21433,
- "Unknown post member state update action {static_cast_int_action}",
- "static_cast_int_action"_attr = static_cast<int>(action));
+ "Unknown post member state update action {action}",
+ "action"_attr = static_cast<int>(action));
fassertFailed(26010);
}
}
@@ -3669,10 +3664,9 @@ void ReplicationCoordinatorImpl::CatchupState::signalHeartbeatUpdate_inlock() {
auto opTimesPerMember = _repl->_topCoord->latestKnownOpTimeSinceHeartbeatRestartPerMember();
for (auto&& pair : opTimesPerMember) {
LOGV2(21367,
- "Member ID: {pair_first}, latest known optime: {pair_second_pair_second_unknown}",
- "pair_first"_attr = pair.first,
- "pair_second_pair_second_unknown"_attr =
- (pair.second ? (*pair.second).toString() : "unknown"));
+ "Member ID: {memberId}, latest known optime: {latestKnownOpTime}",
+ "memberId"_attr = pair.first,
+ "latestKnownOpTime"_attr = (pair.second ? (*pair.second).toString() : "unknown"));
}
if (_waiter) {
@@ -3852,9 +3846,8 @@ ReplicationCoordinatorImpl::_setCurrentRSConfig(WithLock lk,
_selfIndex = myIndex;
if (_selfIndex >= 0) {
LOGV2(21393,
- "This node is {rsConfig_getMemberAt_selfIndex_getHostAndPort} in the config",
- "rsConfig_getMemberAt_selfIndex_getHostAndPort"_attr =
- _rsConfig.getMemberAt(_selfIndex).getHostAndPort());
+ "This node is {hostAndPort} in the config",
+ "hostAndPort"_attr = _rsConfig.getMemberAt(_selfIndex).getHostAndPort());
} else {
LOGV2(21394, "This node is not a member of the config");
}
@@ -4094,9 +4087,8 @@ void ReplicationCoordinatorImpl::resetLastOpTimesFromOplog(OperationContext* opC
if (!lastOpTimeAndWallTimeStatus.getStatus().isOK()) {
LOGV2_WARNING(21412,
"Failed to load timestamp and/or wall clock time of most recently applied "
- "operation; {lastOpTimeAndWallTimeStatus_getStatus}",
- "lastOpTimeAndWallTimeStatus_getStatus"_attr =
- lastOpTimeAndWallTimeStatus.getStatus());
+ "operation; {status}",
+ "status"_attr = lastOpTimeAndWallTimeStatus.getStatus());
} else {
lastOpTimeAndWallTime = lastOpTimeAndWallTimeStatus.getValue();
}
@@ -4268,8 +4260,8 @@ void ReplicationCoordinatorImpl::_setStableTimestampForStorage(WithLock lk) {
if (stableOpTime) {
LOGV2_DEBUG(21396,
2,
- "Setting replication's stable optime to {stableOpTime_value}",
- "stableOpTime_value"_attr = stableOpTime.value());
+ "Setting replication's stable optime to {stableOpTime}",
+ "stableOpTime"_attr = stableOpTime.value());
if (!gTestingSnapshotBehaviorInIsolation) {
// Update committed snapshot and wake up any threads waiting on read concern or
@@ -4358,11 +4350,11 @@ void ReplicationCoordinatorImpl::finishRecoveryIfEligible(OperationContext* opCt
if (!status.isOK()) {
LOGV2_WARNING(21413,
"Failed to transition into {MemberState_MemberState_RS_SECONDARY}. Current "
- "state: {getMemberState}{causedBy_status}",
+ "state: {state}{status}",
"MemberState_MemberState_RS_SECONDARY"_attr =
MemberState(MemberState::RS_SECONDARY),
- "getMemberState"_attr = getMemberState(),
- "causedBy_status"_attr = causedBy(status));
+ "state"_attr = getMemberState(),
+ "status"_attr = causedBy(status));
}
}
@@ -4548,10 +4540,10 @@ Status ReplicationCoordinatorImpl::processHeartbeatV1(const ReplSetHeartbeatArgs
// will trigger reconfig, which cancels and reschedules all heartbeats.
if (args.hasSender()) {
LOGV2(21401,
- "Scheduling heartbeat to fetch a newer config with term {args_getConfigTerm} and "
- "version {args_getConfigVersion} from member: {senderHost}",
- "args_getConfigTerm"_attr = args.getConfigTerm(),
- "args_getConfigVersion"_attr = args.getConfigVersion(),
+ "Scheduling heartbeat to fetch a newer config with term {configTerm} and "
+ "version {configVersion} from member: {senderHost}",
+ "configTerm"_attr = args.getConfigTerm(),
+ "configVersion"_attr = args.getConfigVersion(),
"senderHost"_attr = senderHost);
int senderIndex = _rsConfig.findMemberIndexByHostAndPort(senderHost);
_scheduleHeartbeatToTarget_inlock(senderHost, senderIndex, now);
diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
index f24168e51dd..e33d48f64e4 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp
@@ -56,8 +56,8 @@ public:
return;
}
LOGV2(21434,
- "Lost {isDryRun_dry_run}election due to internal error",
- "isDryRun_dry_run"_attr = (_isDryRun ? "dry run " : ""));
+ "Lost {isDryRun}election due to internal error",
+ "isDryRun"_attr = (_isDryRun ? "dry run " : ""));
_replCoord->_topCoord->processLoseElection();
_replCoord->_voteRequester.reset(nullptr);
if (_isDryRun && _replCoord->_electionDryRunFinishedEvent.isValid()) {
@@ -114,8 +114,8 @@ void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(StartElectionReasonEnu
default:
LOGV2_FATAL(21452,
"Entered replica set election code while in illegal config state "
- "{int_rsConfigState}",
- "int_rsConfigState"_attr = int(_rsConfigState));
+ "{rsConfigState}",
+ "rsConfigState"_attr = int(_rsConfigState));
fassertFailed(28641);
}
@@ -197,9 +197,9 @@ void ReplicationCoordinatorImpl::_processDryRunResult(long long originalTerm,
if (_topCoord->getTerm() != originalTerm) {
LOGV2(21439,
"not running for primary, we have been superseded already during dry run. original "
- "term: {originalTerm}, current term: {topCoord_getTerm}",
+ "term: {originalTerm}, current term: {term}",
"originalTerm"_attr = originalTerm,
- "topCoord_getTerm"_attr = _topCoord->getTerm());
+ "term"_attr = _topCoord->getTerm());
return;
}
@@ -314,9 +314,9 @@ void ReplicationCoordinatorImpl::_writeLastVoteForMyElection(
if (_topCoord->getTerm() != lastVote.getTerm()) {
LOGV2(21446,
"not running for primary, we have been superseded already while writing our last "
- "vote. election term: {lastVote_getTerm}, current term: {topCoord_getTerm}",
- "lastVote_getTerm"_attr = lastVote.getTerm(),
- "topCoord_getTerm"_attr = _topCoord->getTerm());
+ "vote. election term: {electionTerm}, current term: {term}",
+ "electionTerm"_attr = lastVote.getTerm(),
+ "term"_attr = _topCoord->getTerm());
return;
}
_startVoteRequester_inlock(lastVote.getTerm(), reason);
@@ -356,9 +356,9 @@ void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long newTerm,
if (_topCoord->getTerm() != newTerm) {
LOGV2(21447,
"not becoming primary, we have been superseded already during election. election "
- "term: {newTerm}, current term: {topCoord_getTerm}",
+ "term: {newTerm}, current term: {term}",
"newTerm"_attr = newTerm,
- "topCoord_getTerm"_attr = _topCoord->getTerm());
+ "term"_attr = _topCoord->getTerm());
return;
}
@@ -374,8 +374,8 @@ void ReplicationCoordinatorImpl::_onVoteRequestComplete(long long newTerm,
return;
case VoteRequester::Result::kSuccessfullyElected:
LOGV2(21450,
- "election succeeded, assuming primary role in term {topCoord_getTerm}",
- "topCoord_getTerm"_attr = _topCoord->getTerm());
+ "election succeeded, assuming primary role in term {term}",
+ "term"_attr = _topCoord->getTerm());
ReplicationMetrics::get(getServiceContext())
.incrementNumElectionsSuccessfulForReason(reason);
break;
diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
index 5f50ebcfad3..2700355bc07 100644
--- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp
@@ -359,16 +359,15 @@ void remoteStepdownCallback(const executor::TaskExecutor::RemoteCommandCallbackA
if (status.isOK()) {
LOGV2_DEBUG(21477,
1,
- "stepdown of primary({cbData_request_target}) succeeded with response -- "
- "{cbData_response_data}",
- "cbData_request_target"_attr = cbData.request.target,
- "cbData_response_data"_attr = cbData.response.data);
+ "stepdown of primary({primary}) succeeded with response -- "
+ "{response}",
+ "primary"_attr = cbData.request.target,
+ "response"_attr = cbData.response.data);
} else {
- LOGV2_WARNING(
- 21486,
- "stepdown of primary({cbData_request_target}) failed due to {cbData_response_status}",
- "cbData_request_target"_attr = cbData.request.target,
- "cbData_response_status"_attr = cbData.response.status);
+ LOGV2_WARNING(21486,
+ "stepdown of primary({primary}) failed due to {status}",
+ "primary"_attr = cbData.request.target,
+ "status"_attr = cbData.response.status);
}
}
} // namespace
@@ -506,8 +505,8 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig_inlock(const ReplSet
case kConfigReplicationDisabled:
LOGV2_FATAL(21491,
"Reconfiguration request occurred while _rsConfigState == "
- "{int_rsConfigState}; aborting.",
- "int_rsConfigState"_attr = int(_rsConfigState));
+ "{_rsConfigState}; aborting.",
+ "_rsConfigState"_attr = int(_rsConfigState));
fassertFailed(18807);
}
_setConfigState_inlock(kConfigHBReconfiguring);
@@ -570,8 +569,8 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigStore(
if (!myIndex.getStatus().isOK() && myIndex.getStatus() != ErrorCodes::NodeNotFound) {
LOGV2_WARNING(21487,
"Not persisting new configuration in heartbeat response to disk because "
- "it is invalid: {myIndex_getStatus}",
- "myIndex_getStatus"_attr = myIndex.getStatus());
+ "it is invalid: {status}",
+ "status"_attr = myIndex.getStatus());
} else {
LOGV2_FOR_HEARTBEATS(4615626,
2,
@@ -728,22 +727,20 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish(
case ErrorCodes::NodeNotFound:
LOGV2(21482,
"Cannot find self in new replica set configuration; I must be removed; "
- "{myIndex_getStatus}",
- "myIndex_getStatus"_attr = myIndex.getStatus());
+ "{status}",
+ "status"_attr = myIndex.getStatus());
break;
case ErrorCodes::InvalidReplicaSetConfig:
- LOGV2_ERROR(
- 21489,
- "Several entries in new config represent this node; "
- "Removing self until an acceptable configuration arrives; {myIndex_getStatus}",
- "myIndex_getStatus"_attr = myIndex.getStatus());
+ LOGV2_ERROR(21489,
+ "Several entries in new config represent this node; "
+ "Removing self until an acceptable configuration arrives; {status}",
+ "status"_attr = myIndex.getStatus());
break;
default:
- LOGV2_ERROR(
- 21490,
- "Could not validate configuration received from remote node; "
- "Removing self until an acceptable configuration arrives; {myIndex_getStatus}",
- "myIndex_getStatus"_attr = myIndex.getStatus());
+ LOGV2_ERROR(21490,
+ "Could not validate configuration received from remote node; "
+ "Removing self until an acceptable configuration arrives; {status}",
+ "status"_attr = myIndex.getStatus());
break;
}
myIndex = StatusWith<int>(-1);
diff --git a/src/mongo/db/repl/replication_process.cpp b/src/mongo/db/repl/replication_process.cpp
index beb706bc49d..48a3017cbb8 100644
--- a/src/mongo/db/repl/replication_process.cpp
+++ b/src/mongo/db/repl/replication_process.cpp
@@ -92,14 +92,12 @@ Status ReplicationProcess::refreshRollbackID(OperationContext* opCtx) {
}
if (kUninitializedRollbackId == _rbid) {
- LOGV2(21529,
- "Rollback ID is {rbidResult_getValue}",
- "rbidResult_getValue"_attr = rbidResult.getValue());
+ LOGV2(21529, "Rollback ID is {rbid}", "rbid"_attr = rbidResult.getValue());
} else {
LOGV2(21530,
- "Rollback ID is {rbidResult_getValue} (previously {rbid})",
- "rbidResult_getValue"_attr = rbidResult.getValue(),
- "rbid"_attr = _rbid);
+ "Rollback ID is {rbid} (previously {previousRBID})",
+ "rbid"_attr = rbidResult.getValue(),
+ "previousRBID"_attr = _rbid);
}
_rbid = rbidResult.getValue();
@@ -127,15 +125,13 @@ Status ReplicationProcess::initializeRollbackID(OperationContext* opCtx) {
auto initRbidSW = _storageInterface->initializeRollbackID(opCtx);
if (initRbidSW.isOK()) {
- LOGV2(21531,
- "Initialized the rollback ID to {initRbidSW_getValue}",
- "initRbidSW_getValue"_attr = initRbidSW.getValue());
+ LOGV2(21531, "Initialized the rollback ID to {rbid}", "rbid"_attr = initRbidSW.getValue());
_rbid = initRbidSW.getValue();
invariant(kUninitializedRollbackId != _rbid);
} else {
LOGV2_WARNING(21534,
- "Failed to initialize the rollback ID: {initRbidSW_getStatus_reason}",
- "initRbidSW_getStatus_reason"_attr = initRbidSW.getStatus().reason());
+ "Failed to initialize the rollback ID: {status}",
+ "status"_attr = initRbidSW.getStatus().reason());
}
return initRbidSW.getStatus();
}
@@ -148,15 +144,13 @@ Status ReplicationProcess::incrementRollbackID(OperationContext* opCtx) {
// If the rollback ID was incremented successfully, cache the new value in _rbid to be returned
// the next time getRollbackID() is called.
if (status.isOK()) {
- LOGV2(21532,
- "Incremented the rollback ID to {status_getValue}",
- "status_getValue"_attr = status.getValue());
+ LOGV2(21532, "Incremented the rollback ID to {rbid}", "rbid"_attr = status.getValue());
_rbid = status.getValue();
invariant(kUninitializedRollbackId != _rbid);
} else {
LOGV2_WARNING(21535,
- "Failed to increment the rollback ID: {status_getStatus_reason}",
- "status_getStatus_reason"_attr = status.getStatus().reason());
+ "Failed to increment the rollback ID: {status}",
+ "status"_attr = status.getStatus().reason());
}
return status.getStatus();
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 224379cd42a..437bf95d9ce 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -141,8 +141,8 @@ public:
LOGV2_FATAL(
21559,
- "Couldn't find any entries in the oplog{ss_str}, which should be impossible.",
- "ss_str"_attr = ss.str());
+ "Couldn't find any entries in the oplog{oplog}, which should be impossible.",
+ "oplog"_attr = ss.str());
fassertFailedNoTrace(40293);
}
@@ -266,8 +266,8 @@ void ReplicationRecoveryImpl::_assertNoRecoveryNeededOnUnstableCheckpoint(Operat
auto topOfOplogSW = _getTopOfOplog(opCtx);
if (!topOfOplogSW.isOK()) {
LOGV2_FATAL(21565,
- "Recovery not possible, no oplog found: {topOfOplogSW_getStatus}",
- "topOfOplogSW_getStatus"_attr = topOfOplogSW.getStatus());
+ "Recovery not possible, no oplog found: {status}",
+ "status"_attr = topOfOplogSW.getStatus());
fassertFailedNoTrace(31364);
}
const auto topOfOplog = topOfOplogSW.getValue();
@@ -437,8 +437,8 @@ void ReplicationRecoveryImpl::recoverFromOplog(OperationContext* opCtx,
}
} catch (...) {
LOGV2_FATAL(21570,
- "Caught exception during replication recovery: {exceptionToStatus}",
- "exceptionToStatus"_attr = exceptionToStatus());
+ "Caught exception during replication recovery: {exception}",
+ "exception"_attr = exceptionToStatus());
std::terminate();
}
@@ -673,16 +673,16 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx,
// Truncate the oplog AFTER the oplog entry found to be <= truncateAfterTimestamp.
LOGV2(21553,
- "Truncating oplog from {truncateAfterOplogEntry_timestamp} (non-inclusive). Truncate "
+ "Truncating oplog from {timestamp} (non-inclusive). Truncate "
"after point is {truncateAfterTimestamp}",
- "truncateAfterOplogEntry_timestamp"_attr = truncateAfterOplogEntry.getTimestamp(),
+ "timestamp"_attr = truncateAfterOplogEntry.getTimestamp(),
"truncateAfterTimestamp"_attr = truncateAfterTimestamp);
oplogCollection->cappedTruncateAfter(opCtx, truncateAfterRecordId, /*inclusive*/ false);
LOGV2(21554,
- "Replication recovery oplog truncation finished in: {timer_millis}ms",
- "timer_millis"_attr = timer.millis());
+ "Replication recovery oplog truncation finished in: {ms}ms",
+ "ms"_attr = timer.millis());
}
void ReplicationRecoveryImpl::_truncateOplogIfNeededAndThenClearOplogTruncateAfterPoint(
@@ -701,9 +701,9 @@ void ReplicationRecoveryImpl::_truncateOplogIfNeededAndThenClearOplogTruncateAft
LOGV2(21556,
"The oplog truncation point ({truncatePoint}) is equal to or earlier than the stable "
- "timestamp ({stableTimestamp_get}), so truncating after the stable timestamp instead",
+ "timestamp ({stableTimestamp}), so truncating after the stable timestamp instead",
"truncatePoint"_attr = truncatePoint,
- "stableTimestamp_get"_attr = stableTimestamp.get());
+ "stableTimestamp"_attr = stableTimestamp.get());
truncatePoint = stableTimestamp.get();
}
diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp
index 8b63bfbaad4..09df89d3827 100644
--- a/src/mongo/db/repl/reporter.cpp
+++ b/src/mongo/db/repl/reporter.cpp
@@ -215,11 +215,10 @@ StatusWith<BSONObj> Reporter::_prepareCommand() {
// If there was an error in preparing the command, abort and return that error.
if (!prepareResult.isOK()) {
- LOGV2_DEBUG(
- 21586,
- 2,
- "Reporter failed to prepare update command with status: {prepareResult_getStatus}",
- "prepareResult_getStatus"_attr = prepareResult.getStatus());
+ LOGV2_DEBUG(21586,
+ 2,
+ "Reporter failed to prepare update command with status: {status}",
+ "status"_attr = prepareResult.getStatus());
_status = prepareResult.getStatus();
return _status;
}
@@ -228,12 +227,11 @@ StatusWith<BSONObj> Reporter::_prepareCommand() {
}
void Reporter::_sendCommand_inlock(BSONObj commandRequest, Milliseconds netTimeout) {
- LOGV2_DEBUG(
- 21587,
- 2,
- "Reporter sending slave oplog progress to upstream updater {target}: {commandRequest}",
- "target"_attr = _target,
- "commandRequest"_attr = commandRequest);
+ LOGV2_DEBUG(21587,
+ 2,
+ "Reporter sending oplog progress to upstream updater {target}: {commandRequest}",
+ "target"_attr = _target,
+ "commandRequest"_attr = commandRequest);
auto scheduleResult = _executor->scheduleRemoteCommand(
executor::RemoteCommandRequest(_target, "admin", commandRequest, nullptr, netTimeout),
diff --git a/src/mongo/db/repl/roll_back_local_operations.cpp b/src/mongo/db/repl/roll_back_local_operations.cpp
index c2724d687bd..8f7e3fd4c46 100644
--- a/src/mongo/db/repl/roll_back_local_operations.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations.cpp
@@ -116,8 +116,8 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollBackLocalOperations
_scanned++;
LOGV2_DEBUG(21656,
2,
- "Local oplog entry to roll back: {localOplogValue_first}",
- "localOplogValue_first"_attr = redact(_localOplogValue.first));
+ "Local oplog entry to roll back: {localOplog}",
+ "localOplog"_attr = redact(_localOplogValue.first));
auto status = _rollbackOperation(_localOplogValue.first);
if (!status.isOK()) {
invariant(ErrorCodes::NoSuchKey != status.code());
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 9acc27a0149..9c7c0c92119 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -1019,8 +1019,8 @@ StatusWith<RollBackLocalOperations::RollbackCommonPoint> RollbackImpl::_findComm
// recently upgraded to enableMajorityReadConcern=true.
LOGV2_FATAL(21644,
"Common point must be at least stable timestamp, common point: "
- "{commonPointOpTime_getTimestamp}, stable timestamp: {stableTimestamp}",
- "commonPointOpTime_getTimestamp"_attr = commonPointOpTime.getTimestamp(),
+ "{commonPoint}, stable timestamp: {stableTimestamp}",
+ "commonPoint"_attr = commonPointOpTime.getTimestamp(),
"stableTimestamp"_attr = *stableTimestamp);
fassertFailedNoTrace(51121);
}
@@ -1090,12 +1090,12 @@ boost::optional<BSONObj> RollbackImpl::_findDocumentById(OperationContext* opCtx
return boost::none;
} else {
LOGV2_FATAL(21645,
- "Rollback failed to read document with {id} in namespace {nss_ns} with uuid "
- "{uuid}{causedBy_document_getStatus}",
+ "Rollback failed to read document with {id} in namespace {ns} with uuid "
+ "{uuid}{status}",
"id"_attr = redact(id),
- "nss_ns"_attr = nss.ns(),
+ "ns"_attr = nss.ns(),
"uuid"_attr = uuid.toString(),
- "causedBy_document_getStatus"_attr = causedBy(document.getStatus()));
+ "status"_attr = causedBy(document.getStatus()));
fassert(50751, document.getStatus());
}
@@ -1115,9 +1115,8 @@ Status RollbackImpl::_writeRollbackFiles(OperationContext* opCtx) {
LOGV2(21608,
"The collection with UUID {uuid} is missing in the CollectionCatalog. This could "
"be due to a dropped "
- " collection. Not writing rollback file for uuid {uuid2}",
- "uuid"_attr = uuid,
- "uuid2"_attr = uuid);
+ " collection. Not writing rollback file for uuid",
+ "uuid"_attr = uuid);
continue;
}
@@ -1137,11 +1136,11 @@ void RollbackImpl::_writeRollbackFileForNamespace(OperationContext* opCtx,
const SimpleBSONObjUnorderedSet& idSet) {
RemoveSaver removeSaver(kRollbackRemoveSaverType, uuid.toString(), kRollbackRemoveSaverWhy);
LOGV2(21609,
- "Preparing to write deleted documents to a rollback file for collection {nss_ns} with "
- "uuid {uuid} to {removeSaver_file_generic_string}",
- "nss_ns"_attr = nss.ns(),
+ "Preparing to write deleted documents to a rollback file for collection {ns} with "
+ "uuid {uuid} to {file}",
+ "ns"_attr = nss.ns(),
"uuid"_attr = uuid.toString(),
- "removeSaver_file_generic_string"_attr = removeSaver.file().generic_string());
+ "file"_attr = removeSaver.file().generic_string());
// The RemoveSaver will save the data files in a directory structure similar to the following:
//
@@ -1205,16 +1204,14 @@ void RollbackImpl::_transitionFromRollbackToSecondary(OperationContext* opCtx) {
auto status = _replicationCoordinator->setFollowerMode(MemberState::RS_SECONDARY);
if (!status.isOK()) {
- LOGV2_FATAL(
- 21646,
- "Failed to transition into {MemberState_MemberState_RS_SECONDARY}; expected to be in "
- "state {MemberState_MemberState_RS_ROLLBACK}; found self in "
- "{replicationCoordinator_getMemberState}{causedBy_status}",
- "MemberState_MemberState_RS_SECONDARY"_attr = MemberState(MemberState::RS_SECONDARY),
- "MemberState_MemberState_RS_ROLLBACK"_attr = MemberState(MemberState::RS_ROLLBACK),
- "replicationCoordinator_getMemberState"_attr =
- _replicationCoordinator->getMemberState(),
- "causedBy_status"_attr = causedBy(status));
+ LOGV2_FATAL(21646,
+ "Failed to transition into {targetState}; expected to be in "
+ "state {expectedState}; found self in "
+ "{actualState}{status}",
+ "targetState"_attr = MemberState(MemberState::RS_SECONDARY),
+ "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
+ "actualState"_attr = _replicationCoordinator->getMemberState(),
+ "status"_attr = causedBy(status));
fassertFailedNoTrace(40408);
}
}
@@ -1241,35 +1238,30 @@ void RollbackImpl::_resetDropPendingState(OperationContext* opCtx) {
void RollbackImpl::_summarizeRollback(OperationContext* opCtx) const {
LOGV2(21612, "Rollback summary:");
- LOGV2(21613,
- "\tstart time: {rollbackStats_startTime}",
- "rollbackStats_startTime"_attr = _rollbackStats.startTime);
+ LOGV2(21613, "\tstart time: {startTime}", "startTime"_attr = _rollbackStats.startTime);
LOGV2(21614,
- "\tend time: {opCtx_getServiceContext_getFastClockSource_now}",
- "opCtx_getServiceContext_getFastClockSource_now"_attr =
- opCtx->getServiceContext()->getFastClockSource()->now());
+ "\tend time: {endTime}",
+ "endTime"_attr = opCtx->getServiceContext()->getFastClockSource()->now());
LOGV2(21615,
- "\tsync source: {remoteOplog_hostAndPort}",
- "remoteOplog_hostAndPort"_attr = _remoteOplog->hostAndPort().toString());
+ "\tsync source: {syncSource}",
+ "syncSource"_attr = _remoteOplog->hostAndPort().toString());
LOGV2(21616,
"\trollback data file directory: "
- "{rollbackStats_rollbackDataFileDirectory_value_or_none_no_files_written}",
- "rollbackStats_rollbackDataFileDirectory_value_or_none_no_files_written"_attr =
+ "{directory}",
+ "directory"_attr =
_rollbackStats.rollbackDataFileDirectory.value_or("none; no files written"));
if (_rollbackStats.rollbackId) {
- LOGV2(21617,
- "\trollback id: {rollbackStats_rollbackId}",
- "rollbackStats_rollbackId"_attr = *_rollbackStats.rollbackId);
+ LOGV2(21617, "\trollback id: {rbid}", "rbid"_attr = *_rollbackStats.rollbackId);
}
if (_rollbackStats.lastLocalOptime) {
LOGV2(21618,
- "\tlast optime on branch of history rolled back: {rollbackStats_lastLocalOptime}",
- "rollbackStats_lastLocalOptime"_attr = *_rollbackStats.lastLocalOptime);
+ "\tlast optime on branch of history rolled back: {lastLocalOptime}",
+ "lastLocalOptime"_attr = *_rollbackStats.lastLocalOptime);
}
if (_rollbackStats.commonPoint) {
LOGV2(21619,
- "\tcommon point optime: {rollbackStats_commonPoint}",
- "rollbackStats_commonPoint"_attr = *_rollbackStats.commonPoint);
+ "\tcommon point optime: {commonPoint}",
+ "commonPoint"_attr = *_rollbackStats.commonPoint);
}
if (_rollbackStats.lastLocalWallClockTime &&
_rollbackStats.firstOpWallClockTimeAfterCommonPoint) {
@@ -1291,51 +1283,49 @@ void RollbackImpl::_summarizeRollback(OperationContext* opCtx) const {
}
if (_rollbackStats.truncateTimestamp) {
LOGV2(21623,
- "\ttruncate timestamp: {rollbackStats_truncateTimestamp}",
- "rollbackStats_truncateTimestamp"_attr = *_rollbackStats.truncateTimestamp);
+ "\ttruncate timestamp: {truncateTimestamp}",
+ "truncateTimestamp"_attr = *_rollbackStats.truncateTimestamp);
}
if (_rollbackStats.stableTimestamp) {
LOGV2(21624,
- "\tstable timestamp: {rollbackStats_stableTimestamp}",
- "rollbackStats_stableTimestamp"_attr = *_rollbackStats.stableTimestamp);
+ "\tstable timestamp: {stableTimestamp}",
+ "stableTimestamp"_attr = *_rollbackStats.stableTimestamp);
}
LOGV2(21625,
- "\tshard identity document rolled back: {observerInfo_shardIdentityRolledBack}",
- "observerInfo_shardIdentityRolledBack"_attr = _observerInfo.shardIdentityRolledBack);
+ "\tshard identity document rolled back: {shardIdentityRolledBack}",
+ "shardIdentityRolledBack"_attr = _observerInfo.shardIdentityRolledBack);
LOGV2(21626,
"\tconfig server config version document rolled back: "
- "{observerInfo_configServerConfigVersionRolledBack}",
- "observerInfo_configServerConfigVersionRolledBack"_attr =
+ "{configServerConfigVersionRolledBack}",
+ "configServerConfigVersionRolledBack"_attr =
_observerInfo.configServerConfigVersionRolledBack);
LOGV2(21627,
- "\taffected sessions: {observerInfo_rollbackSessionIds_empty_none}",
- "observerInfo_rollbackSessionIds_empty_none"_attr =
- (_observerInfo.rollbackSessionIds.empty() ? "none" : ""));
+ "\taffected sessions: {rollbackSessionIds}",
+ "rollbackSessionIds"_attr = (_observerInfo.rollbackSessionIds.empty() ? "none" : ""));
for (const auto& sessionId : _observerInfo.rollbackSessionIds) {
LOGV2(21628, "\t\t{sessionId}", "sessionId"_attr = sessionId);
}
LOGV2(21629,
- "\taffected namespaces: {observerInfo_rollbackNamespaces_empty_none}",
- "observerInfo_rollbackNamespaces_empty_none"_attr =
- (_observerInfo.rollbackNamespaces.empty() ? "none" : ""));
+ "\taffected namespaces: {rollbackNamespaces}",
+ "rollbackNamespaces"_attr = (_observerInfo.rollbackNamespaces.empty() ? "none" : ""));
for (const auto& nss : _observerInfo.rollbackNamespaces) {
- LOGV2(21630, "\t\t{nss_ns}", "nss_ns"_attr = nss.ns());
+ LOGV2(21630, "\t\t{ns}", "ns"_attr = nss.ns());
}
LOGV2(21631,
"\tcounts of interesting commands rolled back: "
- "{observerInfo_rollbackCommandCounts_empty_none}",
- "observerInfo_rollbackCommandCounts_empty_none"_attr =
+ "{rollbackCommandCounts}",
+ "rollbackCommandCounts"_attr =
(_observerInfo.rollbackCommandCounts.empty() ? "none" : ""));
for (const auto& entry : _observerInfo.rollbackCommandCounts) {
LOGV2(21632,
- "\t\t{entry_first}: {entry_second}",
- "entry_first"_attr = entry.first,
- "entry_second"_attr = entry.second);
+ "\t\t{command}: {count}",
+ "command"_attr = entry.first,
+ "count"_attr = entry.second);
}
LOGV2(21633,
"\ttotal number of entries rolled back (including no-ops): "
- "{observerInfo_numberOfEntriesObserved}",
- "observerInfo_numberOfEntriesObserved"_attr = _observerInfo.numberOfEntriesObserved);
+ "{numberOfEntriesObserved}",
+ "numberOfEntriesObserved"_attr = _observerInfo.numberOfEntriesObserved);
}
} // namespace repl
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 44121aa170d..5ddf035ce51 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -850,9 +850,9 @@ void dropIndex(OperationContext* opCtx,
indexCatalog->findIndexByName(opCtx, indexName, includeUnfinishedIndexes);
if (!indexDescriptor) {
LOGV2_WARNING(21725,
- "Rollback failed to drop index {indexName} in {nss}: index not found.",
+ "Rollback failed to drop index {indexName} in {ns}: index not found.",
"indexName"_attr = indexName,
- "nss"_attr = nss.toString());
+ "ns"_attr = nss.toString());
return;
}
WriteUnitOfWork wunit(opCtx);
@@ -861,18 +861,18 @@ void dropIndex(OperationContext* opCtx,
auto status = indexCatalog->dropIndex(opCtx, indexDescriptor);
if (!status.isOK()) {
LOGV2_FATAL(21738,
- "Rollback failed to drop index {indexName} in {nss}: {status}",
+ "Rollback failed to drop index {indexName} in {ns}: {status}",
"indexName"_attr = indexName,
- "nss"_attr = nss.toString(),
+ "ns"_attr = nss.toString(),
"status"_attr = redact(status));
}
} else {
auto status = indexCatalog->dropUnfinishedIndex(opCtx, indexDescriptor);
if (!status.isOK()) {
LOGV2_FATAL(21739,
- "Rollback failed to drop unfinished index {indexName} in {nss}: {status}",
+ "Rollback failed to drop unfinished index {indexName} in {ns}: {status}",
"indexName"_attr = indexName,
- "nss"_attr = nss.toString(),
+ "ns"_attr = nss.toString(),
"status"_attr = redact(status));
}
}
@@ -916,8 +916,8 @@ void rollbackCreateIndexes(OperationContext* opCtx, UUID uuid, std::set<std::str
const string& indexName = *itIndex;
LOGV2(21672,
- "Dropping index in rollback for collection: {nss}, UUID: {uuid}, index: {indexName}",
- "nss"_attr = *nss,
+ "Dropping index in rollback for collection: {ns}, UUID: {uuid}, index: {indexName}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid,
"indexName"_attr = indexName);
@@ -926,8 +926,8 @@ void rollbackCreateIndexes(OperationContext* opCtx, UUID uuid, std::set<std::str
LOGV2_DEBUG(
21673,
1,
- "Dropped index in rollback for collection: {nss}, UUID: {uuid}, index: {indexName}",
- "nss"_attr = *nss,
+ "Dropped index in rollback for collection: {ns}, UUID: {uuid}, index: {indexName}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid,
"indexName"_attr = indexName);
}
@@ -962,8 +962,8 @@ void rollbackDropIndexes(OperationContext* opCtx,
BSONObj indexSpec = itIndex->second;
LOGV2(21675,
- "Creating index in rollback for collection: {nss}, UUID: {uuid}, index: {indexName}",
- "nss"_attr = *nss,
+ "Creating index in rollback for collection: {ns}, UUID: {uuid}, index: {indexName}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid,
"indexName"_attr = indexName);
@@ -972,8 +972,8 @@ void rollbackDropIndexes(OperationContext* opCtx,
LOGV2_DEBUG(
21676,
1,
- "Created index in rollback for collection: {nss}, UUID: {uuid}, index: {indexName}",
- "nss"_attr = *nss,
+ "Created index in rollback for collection: {ns}, UUID: {uuid}, index: {indexName}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid,
"indexName"_attr = indexName);
}
@@ -989,13 +989,12 @@ void dropCollection(OperationContext* opCtx,
if (RollbackImpl::shouldCreateDataFiles()) {
RemoveSaver removeSaver("rollback", "", collection->uuid().toString());
LOGV2(21677,
- "Rolling back createCollection on {nss}: Preparing to write documents to a rollback "
- "file for a collection {nss2} with uuid {collection_uuid} to "
- "{removeSaver_file_generic_string}",
- "nss"_attr = nss,
- "nss2"_attr = nss,
+ "Rolling back createCollection on {ns}: Preparing to write documents to a rollback "
+ "file for a collection with uuid {collection_uuid} to "
+ "{file}",
+ "ns"_attr = nss,
"collection_uuid"_attr = collection->uuid(),
- "removeSaver_file_generic_string"_attr = removeSaver.file().generic_string());
+ "file"_attr = removeSaver.file().generic_string());
// Performs a collection scan and writes all documents in the collection to disk
// in order to keep an archive of items that were rolled back.
@@ -1007,9 +1006,9 @@ void dropCollection(OperationContext* opCtx,
auto status = removeSaver.goingToDelete(curObj);
if (!status.isOK()) {
LOGV2_FATAL(21740,
- "Rolling back createCollection on {nss} failed to write document to "
+ "Rolling back createCollection on {ns} failed to write document to "
"remove saver file: {status}",
- "nss"_attr = nss,
+ "ns"_attr = nss,
"status"_attr = redact(status));
throw RSFatalException(
"Rolling back createCollection. Failed to write document to remove saver "
@@ -1028,17 +1027,17 @@ void dropCollection(OperationContext* opCtx,
WorkingSetCommon::isValidStatusMemberObject(curObj)) {
Status errorStatus = WorkingSetCommon::getMemberObjectStatus(curObj);
LOGV2_FATAL(21741,
- "Rolling back createCollection on {nss} failed with {errorStatus}. A "
+ "Rolling back createCollection on {ns} failed with {errorStatus}. A "
"full resync is necessary.",
- "nss"_attr = nss,
+ "ns"_attr = nss,
"errorStatus"_attr = redact(errorStatus));
throw RSFatalException(
"Rolling back createCollection failed. A full resync is necessary.");
} else {
LOGV2_FATAL(
21742,
- "Rolling back createCollection on {nss} failed. A full resync is necessary.",
- "nss"_attr = nss);
+ "Rolling back createCollection on {ns} failed. A full resync is necessary.",
+ "ns"_attr = nss);
throw RSFatalException(
"Rolling back createCollection failed. A full resync is necessary.");
}
@@ -1074,10 +1073,10 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
auto tmpNameResult = db->makeUniqueCollectionNamespace(opCtx, "rollback.tmp%%%%%");
if (!tmpNameResult.isOK()) {
LOGV2_FATAL(21743,
- "Unable to generate temporary namespace to rename collection {info_renameTo} "
- "out of the way. {tmpNameResult_getStatus_reason}",
- "info_renameTo"_attr = info.renameTo,
- "tmpNameResult_getStatus_reason"_attr = tmpNameResult.getStatus().reason());
+ "Unable to generate temporary namespace to rename collection {renameTo} "
+ "out of the way. {status}",
+ "renameTo"_attr = info.renameTo,
+ "status"_attr = tmpNameResult.getStatus().reason());
throw RSFatalException(
"Unable to generate temporary namespace to rename collection out of the way.");
}
@@ -1085,15 +1084,13 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
LOGV2_DEBUG(21678,
2,
- "Attempted to rename collection from {info_renameFrom} to {info_renameTo} but "
- "{info_renameTo2} exists already. Temporarily renaming collection {info_renameTo3} "
- "with UUID {collection_uuid} out of the way to {tempNss}",
- "info_renameFrom"_attr = info.renameFrom,
- "info_renameTo"_attr = info.renameTo,
- "info_renameTo2"_attr = info.renameTo,
- "info_renameTo3"_attr = info.renameTo,
+ "Attempted to rename collection from {renameFrom} to {renameTo} but "
+ "collection exists already. Temporarily renaming collection "
+ "with UUID {collection_uuid} out of the way to {tempNs}",
+ "renameFrom"_attr = info.renameFrom,
+ "renameTo"_attr = info.renameTo,
"collection_uuid"_attr = collection->uuid(),
- "tempNss"_attr = tempNss);
+ "tempNs"_attr = tempNss);
// Renaming the collection that was clashing with the attempted rename
// operation to a different collection name.
@@ -1102,9 +1099,9 @@ void renameOutOfTheWay(OperationContext* opCtx, RenameCollectionInfo info, Datab
if (!renameStatus.isOK()) {
LOGV2_FATAL(21744,
- "Unable to rename collection {info_renameTo} out of the way to {tempNss}",
- "info_renameTo"_attr = info.renameTo,
- "tempNss"_attr = tempNss);
+ "Unable to rename collection {renameTo} out of the way to {tempNs}",
+ "renameTo"_attr = info.renameTo,
+ "tempNs"_attr = tempNss);
throw RSFatalException("Unable to rename collection out of the way");
}
}
@@ -1117,11 +1114,11 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti
auto dbName = info.renameFrom.db();
LOGV2(21679,
- "Attempting to rename collection with UUID: {uuid}, from: {info_renameFrom}, to: "
- "{info_renameTo}",
+ "Attempting to rename collection with UUID: {uuid}, from: {renameFrom}, to: "
+ "{renameTo}",
"uuid"_attr = uuid,
- "info_renameFrom"_attr = info.renameFrom,
- "info_renameTo"_attr = info.renameTo);
+ "renameFrom"_attr = info.renameFrom,
+ "renameTo"_attr = info.renameTo);
Lock::DBLock dbLock(opCtx, dbName, MODE_X);
auto databaseHolder = DatabaseHolder::get(opCtx);
auto db = databaseHolder->openDb(opCtx, dbName);
@@ -1143,9 +1140,9 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti
if (!status.isOK()) {
LOGV2_FATAL(21745,
"Rename collection failed to roll back twice. We were unable to rename "
- "collection {info_renameFrom} to {info_renameTo}. {status}",
- "info_renameFrom"_attr = info.renameFrom,
- "info_renameTo"_attr = info.renameTo,
+ "collection {renameFrom} to {renameTo}. {status}",
+ "renameFrom"_attr = info.renameFrom,
+ "renameTo"_attr = info.renameTo,
"status"_attr = status.toString());
throw RSFatalException(
"Rename collection failed to roll back twice. We were unable to rename "
@@ -1158,13 +1155,12 @@ void rollbackRenameCollection(OperationContext* opCtx, UUID uuid, RenameCollecti
throw RSFatalException("Unable to rollback renameCollection command");
}
- LOGV2_DEBUG(
- 21680,
- 1,
- "Renamed collection with UUID: {uuid}, from: {info_renameFrom}, to: {info_renameTo}",
- "uuid"_attr = uuid,
- "info_renameFrom"_attr = info.renameFrom,
- "info_renameTo"_attr = info.renameTo);
+ LOGV2_DEBUG(21680,
+ 1,
+ "Renamed collection with UUID: {uuid}, from: {renameFrom}, to: {renameTo}",
+ "uuid"_attr = uuid,
+ "renameFrom"_attr = info.renameFrom,
+ "renameTo"_attr = info.renameTo);
}
Status _syncRollback(OperationContext* opCtx,
@@ -1180,8 +1176,8 @@ Status _syncRollback(OperationContext* opCtx,
how.localTopOfOplog = replCoord->getMyLastAppliedOpTime();
LOGV2_OPTIONS(21681,
{logv2::LogTag::kRS},
- "Starting rollback. Sync source: {rollbackSource_getSource}",
- "rollbackSource_getSource"_attr = rollbackSource.getSource());
+ "Starting rollback. Sync source: {rollbackSource}",
+ "rollbackSource"_attr = rollbackSource.getSource());
how.rbid = rollbackSource.getRollbackId();
uassert(
40506, "Upstream node rolled back. Need to retry our rollback.", how.rbid == requiredRBID);
@@ -1316,8 +1312,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (nss) {
LOGV2_DEBUG(21687,
2,
- "Refetching document, collection: {nss}, UUID: {uuid}, {doc_id}",
- "nss"_attr = *nss,
+ "Refetching document, collection: {ns}, UUID: {uuid}, {doc_id}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid,
"doc_id"_attr = redact(doc._id));
} else {
@@ -1376,19 +1372,19 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
LOGV2(21689,
"Rollback couldn't re-fetch from uuid: {uuid} _id: {doc_id} "
- "{numFetched}/{fixUpInfo_docsToRefetch_size}: {ex}",
+ "{numFetched}/{docsToRefetch}: {ex}",
"uuid"_attr = uuid,
"doc_id"_attr = redact(doc._id),
"numFetched"_attr = numFetched,
- "fixUpInfo_docsToRefetch_size"_attr = fixUpInfo.docsToRefetch.size(),
+ "docsToRefetch"_attr = fixUpInfo.docsToRefetch.size(),
"ex"_attr = redact(ex));
throw;
}
}
LOGV2(21690,
- "Finished refetching documents. Total size of documents refetched: {goodVersions_size}",
- "goodVersions_size"_attr = goodVersions.size());
+ "Finished refetching documents. Total size of documents refetched: {size}",
+ "size"_attr = goodVersions.size());
// We must start taking unstable checkpoints before rolling back oplog entries. Otherwise, a
// stable checkpoint could include the fixup write (since it is untimestamped) but not the write
@@ -1463,8 +1459,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
LOGV2(21696, "This collection does not exist, UUID: {uuid}", "uuid"_attr = uuid);
} else {
LOGV2(21697,
- "Dropping collection: {nss}, UUID: {uuid}",
- "nss"_attr = *nss,
+ "Dropping collection: {ns}, UUID: {uuid}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid);
AutoGetDb dbLock(opCtx, nss->db(), MODE_X);
@@ -1475,8 +1471,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
dropCollection(opCtx, *nss, collection, db);
LOGV2_DEBUG(21698,
1,
- "Dropped collection: {nss}, UUID: {uuid}",
- "nss"_attr = *nss,
+ "Dropped collection: {ns}, UUID: {uuid}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid);
}
}
@@ -1532,8 +1528,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
invariant(nss);
LOGV2(21702,
- "Resyncing collection metadata for collection: {nss}, UUID: {uuid}",
- "nss"_attr = *nss,
+ "Resyncing collection metadata for collection: {ns}, UUID: {uuid}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid);
Lock::DBLock dbLock(opCtx, nss->db(), MODE_X);
@@ -1555,9 +1551,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// collection.
LOGV2(21703,
- "{nss_ns} not found on remote host, so we do not roll back collmod "
+ "{ns} not found on remote host, so we do not roll back collmod "
"operation. Instead, we will drop the collection soon.",
- "nss_ns"_attr = nss->ns());
+ "ns"_attr = nss->ns());
continue;
}
@@ -1607,12 +1603,12 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
LOGV2_DEBUG(
21704,
1,
- "Resynced collection metadata for collection: {nss}, UUID: {uuid}, with: {info}, "
- "to: {DurableCatalog_get_opCtx_getCollectionOptions_opCtx_collection_getCatalogId}",
- "nss"_attr = *nss,
+ "Resynced collection metadata for collection: {ns}, UUID: {uuid}, with: {info}, "
+ "to: {catalogId}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid,
"info"_attr = redact(info),
- "DurableCatalog_get_opCtx_getCollectionOptions_opCtx_collection_getCatalogId"_attr =
+ "catalogId"_attr =
redact(DurableCatalog::get(opCtx)
->getCollectionOptions(opCtx, collection->getCatalogId())
.toBSON()));
@@ -1663,11 +1659,11 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (RollbackImpl::shouldCreateDataFiles()) {
removeSaver = std::make_unique<RemoveSaver>("rollback", "", uuid.toString());
LOGV2(21709,
- "Preparing to write deleted documents to a rollback file for collection {nss} "
- "with uuid {uuid} to {removeSaver_file_generic_string}",
- "nss"_attr = *nss,
+ "Preparing to write deleted documents to a rollback file for collection {ns} "
+ "with uuid {uuid} to {file}",
+ "ns"_attr = *nss,
"uuid"_attr = uuid.toString(),
- "removeSaver_file_generic_string"_attr = removeSaver->file().generic_string());
+ "file"_attr = removeSaver->file().generic_string());
}
const auto& goodVersionsByDocID = nsAndGoodVersionsByDocID.second;
@@ -1676,10 +1672,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (now - lastProgressUpdate > progressUpdateGap) {
LOGV2(21710,
"{deletes} delete and {updates} update operations processed out of "
- "{goodVersions_size} total operations.",
+ "{total} total operations.",
"deletes"_attr = deletes,
"updates"_attr = updates,
- "goodVersions_size"_attr = goodVersions.size());
+ "total"_attr = goodVersions.size());
lastProgressUpdate = now;
}
const DocID& doc = idAndDoc.first;
@@ -1705,9 +1701,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
LOGV2_FATAL(21747,
- "Rollback cannot write document in namespace {nss_ns} to "
+ "Rollback cannot write document in namespace {ns} to "
"archive file: {status}",
- "nss_ns"_attr = nss->ns(),
+ "ns"_attr = nss->ns(),
"status"_attr = redact(status));
throw RSFatalException(str::stream()
<< "Rollback cannot write document in namespace "
@@ -1715,19 +1711,19 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
}
} else {
LOGV2_ERROR(21730,
- "Rollback cannot find object: {pattern} in namespace {nss_ns}",
+ "Rollback cannot find object: {pattern} in namespace {ns}",
"pattern"_attr = pattern,
- "nss_ns"_attr = nss->ns());
+ "ns"_attr = nss->ns());
}
}
if (idAndDoc.second.isEmpty()) {
LOGV2_DEBUG(21711,
2,
- "Deleting document with: {doc_id}, from collection: {doc_ns}, with "
+ "Deleting document with: {doc_id}, from collection: {ns}, with "
"UUID: {uuid}",
"doc_id"_attr = redact(doc._id),
- "doc_ns"_attr = doc.ns,
+ "ns"_attr = doc.ns,
"uuid"_attr = uuid);
// If the document could not be found on the primary, deletes the document.
// TODO 1.6 : can't delete from a capped collection. Need to handle that
@@ -1748,10 +1744,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const auto findOneStart = clock->now();
RecordId loc = Helpers::findOne(opCtx, collection, pattern, false);
if (clock->now() - findOneStart > Milliseconds(200))
- LOGV2_WARNING(
- 21726,
- "Roll back slow no _id index for {nss_ns} perhaps?",
- "nss_ns"_attr = nss->ns());
+ LOGV2_WARNING(21726,
+ "Roll back slow no _id index for {ns} perhaps?",
+ "ns"_attr = nss->ns());
// Would be faster but requires index:
// RecordId loc = Helpers::findById(nsd, pattern);
if (!loc.isNull()) {
@@ -1786,12 +1781,11 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
LOGV2_WARNING(21727,
"Ignoring failure to roll back change to capped "
- "collection {nss_ns} with _id "
- "{idAndDoc_first_id_includeFieldName_false}: {e}",
- "nss_ns"_attr = nss->ns(),
- "idAndDoc_first_id_includeFieldName_false"_attr =
- redact(idAndDoc.first._id.toString(
- /*includeFieldName*/ false)),
+ "collection {ns} with _id "
+ "{_id}: {e}",
+ "ns"_attr = nss->ns(),
+ "_id"_attr = redact(idAndDoc.first._id.toString(
+ /*includeFieldName*/ false)),
"e"_attr = redact(e));
}
} else {
@@ -1807,11 +1801,11 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
LOGV2_DEBUG(21712,
2,
"Updating document with: {doc_id}, from collection: {doc_ns}, "
- "UUID: {uuid}, to: {idAndDoc_second}",
+ "UUID: {uuid}, to: {doc}",
"doc_id"_attr = redact(doc._id),
"doc_ns"_attr = doc.ns,
"uuid"_attr = uuid,
- "idAndDoc_second"_attr = redact(idAndDoc.second));
+ "doc"_attr = redact(idAndDoc.second));
// TODO faster...
updates++;
@@ -1826,8 +1820,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
}
} catch (const DBException& e) {
LOGV2(21713,
- "Exception in rollback ns:{nss_ns} {pattern} {e} ndeletes:{deletes}",
- "nss_ns"_attr = nss->ns(),
+ "Exception in rollback ns:{ns} {pattern} {e} ndeletes:{deletes}",
+ "ns"_attr = nss->ns(),
"pattern"_attr = pattern.toString(),
"e"_attr = redact(e),
"deletes"_attr = deletes);
@@ -1848,8 +1842,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const bool force = true;
LOGV2(21715,
"Forcing the stable timestamp to the common point: "
- "{fixUpInfo_commonPoint_getTimestamp}",
- "fixUpInfo_commonPoint_getTimestamp"_attr = fixUpInfo.commonPoint.getTimestamp());
+ "{commonPoint}",
+ "commonPoint"_attr = fixUpInfo.commonPoint.getTimestamp());
opCtx->getServiceContext()->getStorageEngine()->setStableTimestamp(
fixUpInfo.commonPoint.getTimestamp(), force);
@@ -1865,10 +1859,9 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
.getTimestamp();
LOGV2(21716,
"Setting initialDataTimestamp to the max of local top of oplog and sync source "
- "top of oplog. Local top of oplog: {fixUpInfo_localTopOfOplog_getTimestamp}, sync "
+ "top of oplog. Local top of oplog: {localTopOfOplog}, sync "
"source top of oplog: {syncSourceTopOfOplog}",
- "fixUpInfo_localTopOfOplog_getTimestamp"_attr =
- fixUpInfo.localTopOfOplog.getTimestamp(),
+ "localTopOfOplog"_attr = fixUpInfo.localTopOfOplog.getTimestamp(),
"syncSourceTopOfOplog"_attr = syncSourceTopOfOplog);
opCtx->getServiceContext()->getStorageEngine()->setInitialDataTimestamp(
std::max(fixUpInfo.localTopOfOplog.getTimestamp(), syncSourceTopOfOplog));
@@ -1881,10 +1874,10 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
}
LOGV2(21718,
- "Truncating the oplog at {fixUpInfo_commonPoint} ({fixUpInfo_commonPointOurDiskloc}), "
+ "Truncating the oplog at {commonPoint} ({commonPointOurDiskloc}), "
"non-inclusive",
- "fixUpInfo_commonPoint"_attr = fixUpInfo.commonPoint.toString(),
- "fixUpInfo_commonPointOurDiskloc"_attr = fixUpInfo.commonPointOurDiskloc);
+ "commonPoint"_attr = fixUpInfo.commonPoint.toString(),
+ "commonPointOurDiskloc"_attr = fixUpInfo.commonPointOurDiskloc);
// Cleans up the oplog.
{
@@ -1912,8 +1905,8 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
// point TS would be incorrect (since this is equal to the stable timestamp), and this write
// will be included in the unstable checkpoint regardless of its timestamp.
LOGV2(21719,
- "Setting appliedThrough to the common point: {fixUpInfo_commonPoint}",
- "fixUpInfo_commonPoint"_attr = fixUpInfo.commonPoint);
+ "Setting appliedThrough to the common point: {commonPoint}",
+ "commonPoint"_attr = fixUpInfo.commonPoint);
const bool setTimestamp = false;
replicationProcess->getConsistencyMarkers()->setAppliedThrough(
opCtx, fixUpInfo.commonPoint, setTimestamp);
@@ -1990,8 +1983,8 @@ Status syncRollback(OperationContext* opCtx,
LOGV2_OPTIONS(21722,
{logv2::LogTag::kRS},
"Rollback finished. The final minValid is: "
- "{replicationProcess_getConsistencyMarkers_getMinValid_opCtx}",
- "replicationProcess_getConsistencyMarkers_getMinValid_opCtx"_attr =
+ "{minValid}",
+ "minValid"_attr =
replicationProcess->getConsistencyMarkers()->getMinValid(opCtx));
return status;
@@ -2023,12 +2016,11 @@ void rollback(OperationContext* opCtx,
auto status = replCoord->setFollowerModeStrict(opCtx, MemberState::RS_ROLLBACK);
if (!status.isOK()) {
LOGV2(21723,
- "Cannot transition from {replCoord_getMemberState} to "
- "{MemberState_MemberState_RS_ROLLBACK}{causedBy_status}",
- "replCoord_getMemberState"_attr = replCoord->getMemberState().toString(),
- "MemberState_MemberState_RS_ROLLBACK"_attr =
- MemberState(MemberState::RS_ROLLBACK).toString(),
- "causedBy_status"_attr = causedBy(status));
+ "Cannot transition from {memberState} to "
+ "{targetState}{status}",
+ "memberState"_attr = replCoord->getMemberState().toString(),
+ "targetState"_attr = MemberState(MemberState::RS_ROLLBACK).toString(),
+ "status"_attr = causedBy(status));
return;
}
}
@@ -2068,11 +2060,11 @@ void rollback(OperationContext* opCtx,
LOGV2_WARNING(21728,
"Rollback cannot complete at this time (retrying later): {ex} "
- "appliedThrough= {replCoord_getMyLastAppliedOpTime} minvalid= "
- "{replicationProcess_getConsistencyMarkers_getMinValid_opCtx}",
+ "appliedThrough= {myLastAppliedOpTime} minvalid= "
+ "{minValid}",
"ex"_attr = redact(ex),
- "replCoord_getMyLastAppliedOpTime"_attr = replCoord->getMyLastAppliedOpTime(),
- "replicationProcess_getConsistencyMarkers_getMinValid_opCtx"_attr =
+ "myLastAppliedOpTime"_attr = replCoord->getMyLastAppliedOpTime(),
+ "minValid"_attr =
replicationProcess->getConsistencyMarkers()->getMinValid(opCtx));
// Sleep a bit to allow upstream node to coalesce, if that was the cause of the failure. If
@@ -2104,15 +2096,14 @@ void rollback(OperationContext* opCtx,
auto status = replCoord->setFollowerMode(MemberState::RS_RECOVERING);
if (!status.isOK()) {
- LOGV2_FATAL(
- 21751,
- "Failed to transition into {MemberState_MemberState_RS_RECOVERING}; expected to be in "
- "state {MemberState_MemberState_RS_ROLLBACK}; found self in "
- "{replCoord_getMemberState}{causedBy_status}",
- "MemberState_MemberState_RS_RECOVERING"_attr = MemberState(MemberState::RS_RECOVERING),
- "MemberState_MemberState_RS_ROLLBACK"_attr = MemberState(MemberState::RS_ROLLBACK),
- "replCoord_getMemberState"_attr = replCoord->getMemberState(),
- "causedBy_status"_attr = causedBy(status));
+ LOGV2_FATAL(21751,
+ "Failed to transition into {targetState}; expected to be in "
+ "state {expectedState}; found self in "
+ "{actualState}{status}",
+ "targetState"_attr = MemberState(MemberState::RS_RECOVERING),
+ "expectedState"_attr = MemberState(MemberState::RS_ROLLBACK),
+ "actualState"_attr = replCoord->getMemberState(),
+ "status"_attr = causedBy(status));
fassertFailedNoTrace(40499);
}
}
diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp
index cea5ed2ed71..c9836733c38 100644
--- a/src/mongo/db/repl/scatter_gather_runner.cpp
+++ b/src/mongo/db/repl/scatter_gather_runner.cpp
@@ -106,9 +106,9 @@ StatusWith<EventHandle> ScatterGatherRunner::RunnerImpl::start(
std::vector<RemoteCommandRequest> requests = _algorithm->getRequests();
for (size_t i = 0; i < requests.size(); ++i) {
LOGV2(21752,
- "Scheduling remote command request for {logMessage}: {requests_i}",
- "logMessage"_attr = _logMessage,
- "requests_i"_attr = requests[i].toString());
+ "Scheduling remote command request for {message}: {request}",
+ "message"_attr = _logMessage,
+ "request"_attr = requests[i].toString());
const StatusWith<CallbackHandle> cbh =
_executor->scheduleRemoteCommand(requests[i], processResponseCB);
if (cbh.getStatus() == ErrorCodes::ShutdownInProgress) {
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index bbbf34c3cd2..f62634da128 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -192,8 +192,8 @@ StorageInterfaceImpl::createCollectionForBulkLoading(
LOGV2_DEBUG(21753,
2,
- "StorageInterfaceImpl::createCollectionForBulkLoading called for ns: {nss_ns}",
- "nss_ns"_attr = nss.ns());
+ "StorageInterfaceImpl::createCollectionForBulkLoading called for ns: {ns}",
+ "ns"_attr = nss.ns());
class StashClient {
public:
@@ -387,9 +387,7 @@ Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) {
std::vector<std::string> dbNames =
opCtx->getServiceContext()->getStorageEngine()->listDatabases();
invariant(!dbNames.empty());
- LOGV2(21754,
- "dropReplicatedDatabases - dropping {dbNames_size} databases",
- "dbNames_size"_attr = dbNames.size());
+ LOGV2(21754, "dropReplicatedDatabases - dropping {num} databases", "num"_attr = dbNames.size());
ReplicationCoordinator::get(opCtx)->dropAllSnapshots();
@@ -414,9 +412,7 @@ Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) {
});
}
invariant(hasLocalDatabase, "local database missing");
- LOGV2(21756,
- "dropReplicatedDatabases - dropped {dbNames_size} databases",
- "dbNames_size"_attr = dbNames.size());
+ LOGV2(21756, "dropReplicatedDatabases - dropped {num} databases", "num"_attr = dbNames.size());
return Status::OK();
}
diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp
index 31270a850c0..1e7e33eb275 100644
--- a/src/mongo/db/repl/sync_source_feedback.cpp
+++ b/src/mongo/db/repl/sync_source_feedback.cpp
@@ -104,11 +104,10 @@ void SyncSourceFeedback::forwardSlaveProgress() {
if (_reporter) {
auto triggerStatus = _reporter->trigger();
if (!triggerStatus.isOK()) {
- LOGV2_WARNING(
- 21764,
- "unable to forward slave progress to {reporter_getTarget}: {triggerStatus}",
- "reporter_getTarget"_attr = _reporter->getTarget(),
- "triggerStatus"_attr = triggerStatus);
+ LOGV2_WARNING(21764,
+ "unable to forward progress to {target}: {triggerStatus}",
+ "target"_attr = _reporter->getTarget(),
+ "triggerStatus"_attr = triggerStatus);
}
}
}
diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp
index baba3e08744..a7e289e3996 100644
--- a/src/mongo/db/repl/sync_source_resolver.cpp
+++ b/src/mongo/db/repl/sync_source_resolver.cpp
@@ -226,8 +226,8 @@ Status SyncSourceResolver::_scheduleFetcher(std::unique_ptr<Fetcher> fetcher) {
} else {
LOGV2_ERROR(21776,
"Error scheduling fetcher to evaluate host as sync source, "
- "host:{fetcher_getSource}, error: {status}",
- "fetcher_getSource"_attr = fetcher->getSource(),
+ "host:{source}, error: {status}",
+ "source"_attr = fetcher->getSource(),
"status"_attr = status);
}
return status;
@@ -239,10 +239,10 @@ OpTime SyncSourceResolver::_parseRemoteEarliestOpTime(const HostAndPort& candida
// Remote oplog is empty.
const auto until = _taskExecutor->now() + kOplogEmptyBlacklistDuration;
LOGV2(21766,
- "Blacklisting {candidate} due to empty oplog for {kOplogEmptyBlacklistDuration} "
+ "Blacklisting {candidate} due to empty oplog for {duration} "
"until: {until}",
"candidate"_attr = candidate,
- "kOplogEmptyBlacklistDuration"_attr = kOplogEmptyBlacklistDuration,
+ "duration"_attr = kOplogEmptyBlacklistDuration,
"until"_attr = until);
_syncSourceSelector->blacklistSyncSource(candidate, until);
return OpTime();
@@ -254,10 +254,9 @@ OpTime SyncSourceResolver::_parseRemoteEarliestOpTime(const HostAndPort& candida
const auto until = _taskExecutor->now() + kFirstOplogEntryEmptyBlacklistDuration;
LOGV2(21767,
"Blacklisting {candidate} due to empty first document for "
- "{kFirstOplogEntryEmptyBlacklistDuration} until: {until}",
+ "{duration} until: {until}",
"candidate"_attr = candidate,
- "kFirstOplogEntryEmptyBlacklistDuration"_attr =
- kFirstOplogEntryEmptyBlacklistDuration,
+ "duration"_attr = kFirstOplogEntryEmptyBlacklistDuration,
"until"_attr = until);
_syncSourceSelector->blacklistSyncSource(candidate, until);
return OpTime();
@@ -268,14 +267,13 @@ OpTime SyncSourceResolver::_parseRemoteEarliestOpTime(const HostAndPort& candida
const auto until = _taskExecutor->now() + kFirstOplogEntryNullTimestampBlacklistDuration;
LOGV2(21768,
"Blacklisting {candidate} due to error parsing OpTime from the oldest oplog entry "
- "for {kFirstOplogEntryNullTimestampBlacklistDuration} until: {until}. Error: "
- "{remoteEarliestOpTime_getStatus}, Entry: {firstObjFound}",
+ "for {duration} until: {until}. Error: "
+ "{status}, Entry: {entry}",
"candidate"_attr = candidate,
- "kFirstOplogEntryNullTimestampBlacklistDuration"_attr =
- kFirstOplogEntryNullTimestampBlacklistDuration,
+ "duration"_attr = kFirstOplogEntryNullTimestampBlacklistDuration,
"until"_attr = until,
- "remoteEarliestOpTime_getStatus"_attr = remoteEarliestOpTime.getStatus(),
- "firstObjFound"_attr = redact(firstObjFound));
+ "status"_attr = remoteEarliestOpTime.getStatus(),
+ "entry"_attr = redact(firstObjFound));
_syncSourceSelector->blacklistSyncSource(candidate, until);
return OpTime();
}
@@ -285,10 +283,9 @@ OpTime SyncSourceResolver::_parseRemoteEarliestOpTime(const HostAndPort& candida
const auto until = _taskExecutor->now() + kFirstOplogEntryNullTimestampBlacklistDuration;
LOGV2(21769,
"Blacklisting {candidate} due to null timestamp in first document for "
- "{kFirstOplogEntryNullTimestampBlacklistDuration} until: {until}",
+ "{duration} until: {until}",
"candidate"_attr = candidate,
- "kFirstOplogEntryNullTimestampBlacklistDuration"_attr =
- kFirstOplogEntryNullTimestampBlacklistDuration,
+ "duration"_attr = kFirstOplogEntryNullTimestampBlacklistDuration,
"until"_attr = until);
_syncSourceSelector->blacklistSyncSource(candidate, until);
return OpTime();
@@ -319,11 +316,11 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
// We got an error.
const auto until = _taskExecutor->now() + kFetcherErrorBlacklistDuration;
LOGV2(21770,
- "Blacklisting {candidate} due to error: '{queryResult_getStatus}' for "
- "{kFetcherErrorBlacklistDuration} until: {until}",
+ "Blacklisting {candidate} due to error: '{status}' for "
+ "{duration} until: {until}",
"candidate"_attr = candidate,
- "queryResult_getStatus"_attr = queryResult.getStatus(),
- "kFetcherErrorBlacklistDuration"_attr = kFetcherErrorBlacklistDuration,
+ "status"_attr = queryResult.getStatus(),
+ "duration"_attr = kFetcherErrorBlacklistDuration,
"until"_attr = until);
_syncSourceSelector->blacklistSyncSource(candidate, until);
@@ -346,12 +343,12 @@ void SyncSourceResolver::_firstOplogEntryFetcherCallback(
LOGV2(21771,
"We are too stale to use {candidate} as a sync source. Blacklisting this sync source "
- "because our last fetched timestamp: {lastOpTimeFetched_getTimestamp} is before "
- "their earliest timestamp: {remoteEarliestOpTime_getTimestamp} for "
+ "because our last fetched timestamp: {lastOpTimeFetchedTimestamp} is before "
+ "their earliest timestamp: {remoteEarliestOpTimeTimestamp} for "
"{blacklistDuration} until: {until}",
"candidate"_attr = candidate,
- "lastOpTimeFetched_getTimestamp"_attr = _lastOpTimeFetched.getTimestamp(),
- "remoteEarliestOpTime_getTimestamp"_attr = remoteEarliestOpTime.getTimestamp(),
+ "lastOpTimeFetchedTimestamp"_attr = _lastOpTimeFetched.getTimestamp(),
+ "remoteEarliestOpTimeTimestamp"_attr = remoteEarliestOpTime.getTimestamp(),
"blacklistDuration"_attr = blacklistDuration,
"until"_attr = until);
@@ -420,11 +417,11 @@ void SyncSourceResolver::_rbidRequestCallback(
} catch (const DBException& ex) {
const auto until = _taskExecutor->now() + kFetcherErrorBlacklistDuration;
LOGV2(21772,
- "Blacklisting {candidate} due to error: '{ex}' for {kFetcherErrorBlacklistDuration} "
+ "Blacklisting {candidate} due to error: '{ex}' for {duration} "
"until: {until}",
"candidate"_attr = candidate,
"ex"_attr = ex,
- "kFetcherErrorBlacklistDuration"_attr = kFetcherErrorBlacklistDuration,
+ "duration"_attr = kFetcherErrorBlacklistDuration,
"until"_attr = until);
_syncSourceSelector->blacklistSyncSource(candidate, until);
_chooseAndProbeNextSyncSource(earliestOpTimeSeen).transitional_ignore();
@@ -496,11 +493,11 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
const auto until = _taskExecutor->now() + kFetcherErrorBlacklistDuration;
LOGV2(21773,
"Blacklisting {candidate} due to required optime fetcher error: "
- "'{queryResult_getStatus}' for {kFetcherErrorBlacklistDuration} until: {until}. "
+ "'{status}' for {duration} until: {until}. "
"required optime: {requiredOpTime}",
"candidate"_attr = candidate,
- "queryResult_getStatus"_attr = queryResult.getStatus(),
- "kFetcherErrorBlacklistDuration"_attr = kFetcherErrorBlacklistDuration,
+ "status"_attr = queryResult.getStatus(),
+ "duration"_attr = kFetcherErrorBlacklistDuration,
"until"_attr = until,
"requiredOpTime"_attr = _requiredOpTime);
_syncSourceSelector->blacklistSyncSource(candidate, until);
@@ -518,12 +515,12 @@ void SyncSourceResolver::_requiredOpTimeFetcherCallback(
"We cannot use {candidate} as a sync source because it does not contain the necessary "
"operations for us to reach a consistent state: {status} last fetched optime: "
"{lastOpTimeFetched}. required optime: {requiredOpTime}. Blacklisting this sync source "
- "for {kNoRequiredOpTimeBlacklistDuration} until: {until}",
+ "for {duration} until: {until}",
"candidate"_attr = candidate.toString(),
"status"_attr = status,
"lastOpTimeFetched"_attr = _lastOpTimeFetched,
"requiredOpTime"_attr = _requiredOpTime,
- "kNoRequiredOpTimeBlacklistDuration"_attr = kNoRequiredOpTimeBlacklistDuration,
+ "duration"_attr = kNoRequiredOpTimeBlacklistDuration,
"until"_attr = until);
_syncSourceSelector->blacklistSyncSource(candidate, until);
@@ -582,8 +579,8 @@ Status SyncSourceResolver::_finishCallback(const SyncSourceResolverResponse& res
_onCompletion(response);
} catch (...) {
LOGV2_WARNING(21775,
- "sync source resolver finish callback threw exception: {exceptionToStatus}",
- "exceptionToStatus"_attr = exceptionToStatus());
+ "sync source resolver finish callback threw exception: {exception}",
+ "exception"_attr = exceptionToStatus());
}
stdx::lock_guard<Latch> lock(_mutex);
diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp
index 87126dbcc64..c614eef3011 100644
--- a/src/mongo/db/repl/task_runner.cpp
+++ b/src/mongo/db/repl/task_runner.cpp
@@ -66,8 +66,8 @@ TaskRunner::NextAction runSingleTask(const TaskRunner::Task& task,
return task(opCtx, status);
} catch (...) {
LOGV2(21777,
- "Unhandled exception in task runner: {exceptionToStatus}",
- "exceptionToStatus"_attr = redact(exceptionToStatus()));
+ "Unhandled exception in task runner: {exception}",
+ "exception"_attr = redact(exceptionToStatus()));
}
return TaskRunner::NextAction::kCancel;
}
diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp
index ca3d1d0dd03..3d2383332f1 100644
--- a/src/mongo/db/repl/topology_coordinator.cpp
+++ b/src/mongo/db/repl/topology_coordinator.cpp
@@ -2297,8 +2297,8 @@ std::string TopologyCoordinator::_getUnelectableReasonString(const UnelectableRe
}
if (!hasWrittenToStream) {
LOGV2_FATAL(21842,
- "Invalid UnelectableReasonMask value 0x{integerToHex_ur}",
- "integerToHex_ur"_attr = integerToHex(ur));
+ "Invalid UnelectableReasonMask value 0x{value}",
+ "value"_attr = integerToHex(ur));
fassertFailed(26011);
}
ss << " (mask 0x" << integerToHex(ur) << ")";
@@ -2686,8 +2686,8 @@ bool TopologyCoordinator::advanceLastCommittedOpTimeAndWallTime(OpTimeAndWallTim
LOGV2_DEBUG(21823,
1,
"Ignoring older committed snapshot from before I became primary, optime: "
- "{committedOpTime_opTime}, firstOpTimeOfMyTerm: {firstOpTimeOfMyTerm}",
- "committedOpTime_opTime"_attr = committedOpTime.opTime,
+ "{committedOpTime}, firstOpTimeOfMyTerm: {firstOpTimeOfMyTerm}",
+ "committedOpTime"_attr = committedOpTime.opTime,
"firstOpTimeOfMyTerm"_attr = _firstOpTimeOfMyTerm);
return false;
}
@@ -2703,9 +2703,9 @@ bool TopologyCoordinator::advanceLastCommittedOpTimeAndWallTime(OpTimeAndWallTim
"Ignoring commit point with different term than my lastApplied, since it "
"may "
"not be on the same oplog branch as mine. optime: {committedOpTime}, my "
- "last applied: {getMyLastAppliedOpTimeAndWallTime}",
+ "last applied: {myLastAppliedOpTimeAndWallTime}",
"committedOpTime"_attr = committedOpTime,
- "getMyLastAppliedOpTimeAndWallTime"_attr =
+ "myLastAppliedOpTimeAndWallTime"_attr =
getMyLastAppliedOpTimeAndWallTime());
return false;
}
@@ -2790,8 +2790,11 @@ TopologyCoordinator::UpdateTermResult TopologyCoordinator::updateTerm(long long
if (_iAmPrimary()) {
return TopologyCoordinator::UpdateTermResult::kTriggerStepDown;
}
- LOGV2_DEBUG(
- 21827, 1, "Updating term from {term} to {term2}", "term"_attr = _term, "term2"_attr = term);
+ LOGV2_DEBUG(21827,
+ 1,
+ "Updating term from {oldTerm} to {newTerm}",
+ "oldTerm"_attr = _term,
+ "newTerm"_attr = term);
_term = term;
return TopologyCoordinator::UpdateTermResult::kUpdatedTerm;
}
@@ -2824,9 +2827,8 @@ bool TopologyCoordinator::shouldChangeSyncSource(
if (_forceSyncSourceIndex != -1) {
LOGV2(21829,
"Choosing new sync source because the user has requested to use "
- "{rsConfig_getMemberAt_forceSyncSourceIndex_getHostAndPort} as a sync source",
- "rsConfig_getMemberAt_forceSyncSourceIndex_getHostAndPort"_attr =
- _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort());
+ "{source} as a sync source",
+ "source"_attr = _rsConfig.getMemberAt(_forceSyncSourceIndex).getHostAndPort());
return true;
}
@@ -2838,10 +2840,10 @@ bool TopologyCoordinator::shouldChangeSyncSource(
LOGV2(
21830,
"Choosing new sync source because the config version supplied by {currentSource}, "
- "{replMetadata_getConfigVersion}, does not match ours, {rsConfig_getConfigVersion}",
+ "{sourceConfigVersion}, does not match ours, {configVersion}",
"currentSource"_attr = currentSource,
- "replMetadata_getConfigVersion"_attr = replMetadata.getConfigVersion(),
- "rsConfig_getConfigVersion"_attr = _rsConfig.getConfigVersion());
+ "sourceConfigVersion"_attr = replMetadata.getConfigVersion(),
+ "configVersion"_attr = _rsConfig.getConfigVersion());
return true;
}
}
@@ -2901,7 +2903,7 @@ bool TopologyCoordinator::shouldChangeSyncSource(
} else {
logMessage << " (sync source does not know the primary)";
}
- LOGV2(21832, "{logMessage_str}", "logMessage_str"_attr = logMessage.str());
+ LOGV2(21832, "{msg}", "msg"_attr = logMessage.str());
return true;
}
@@ -2925,19 +2927,17 @@ bool TopologyCoordinator::shouldChangeSyncSource(
(candidateConfig.shouldBuildIndexes() || !_selfConfig().shouldBuildIndexes()) &&
it->getState().readable() && !_memberIsBlacklisted(candidateConfig, now) &&
goalSecs < it->getHeartbeatAppliedOpTime().getSecs()) {
- LOGV2(
- 21834,
- "Choosing new sync source because the most recent OpTime of our sync "
- "source, {currentSource}, is {currentSourceOpTime} which is more than "
- "{options_maxSyncSourceLagSecs} behind member {candidateConfig_getHostAndPort} "
- "whose most recent OpTime is {it_getHeartbeatAppliedOpTime}",
- "currentSource"_attr = currentSource,
- "currentSourceOpTime"_attr = currentSourceOpTime.toString(),
- "options_maxSyncSourceLagSecs"_attr = _options.maxSyncSourceLagSecs,
- "candidateConfig_getHostAndPort"_attr =
- candidateConfig.getHostAndPort().toString(),
- "it_getHeartbeatAppliedOpTime"_attr =
- it->getHeartbeatAppliedOpTime().toString());
+ LOGV2(21834,
+ "Choosing new sync source because the most recent OpTime of our sync "
+ "source, {currentSource}, is {currentSourceOpTime} which is more than "
+ "{maxSyncSourceLagSecs} behind member {member} "
+ "whose most recent OpTime is {memberHearbeatAppliedOpTime}",
+ "currentSource"_attr = currentSource,
+ "currentSourceOpTime"_attr = currentSourceOpTime.toString(),
+ "maxSyncSourceLagSecs"_attr = _options.maxSyncSourceLagSecs,
+ "member"_attr = candidateConfig.getHostAndPort().toString(),
+ "memberHearbeatAppliedOpTime"_attr =
+ it->getHeartbeatAppliedOpTime().toString());
invariant(itIndex != _selfIndex);
return true;
}