summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2020-02-17 22:40:59 -0500
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-02-18 04:03:33 +0000
commit811b695a37c8c1469b4f6475e62c9a6680b3d95f (patch)
tree37eba8845a07d2904bc6ca292a1ea31475a1168a
parent95c8fc6a4a98f70b07f32565e3ef3e48172efa1e (diff)
downloadmongo-811b695a37c8c1469b4f6475e62c9a6680b3d95f.tar.gz
SERVER-46026 Fix tests reliant on specific log lines
-rw-r--r--jstests/core/getlog2.js22
-rw-r--r--jstests/multiVersion/initial_sync_drop_against_last_stable.js9
-rw-r--r--jstests/noPassthrough/aggregation_log_namespace.js20
-rw-r--r--jstests/noPassthrough/bind_localhost.js2
-rw-r--r--jstests/noPassthrough/data_consistency_checks.js12
-rw-r--r--jstests/noPassthrough/index_stepdown_unique.js2
-rw-r--r--jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js17
-rw-r--r--jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js15
-rw-r--r--jstests/noPassthrough/wt_operation_stats.js6
-rw-r--r--jstests/replsets/collection_clone_resume_after_network_error.js18
-rw-r--r--jstests/replsets/create_drop_database_different_casing.js9
-rw-r--r--jstests/replsets/drop_databases_two_phase.js14
-rw-r--r--jstests/replsets/initial_sync_drop_collection.js25
-rw-r--r--jstests/replsets/initial_sync_invalid_index_spec.js14
-rw-r--r--jstests/replsets/initial_sync_rename_collection.js27
-rw-r--r--jstests/replsets/initial_sync_test_fixture_test.js13
-rw-r--r--jstests/replsets/invalid_index_spec.js19
-rw-r--r--jstests/replsets/last_error_reported_after_stepdown.js36
-rw-r--r--jstests/replsets/libs/election_handoff.js28
-rw-r--r--jstests/replsets/mr_nonrepl_coll_in_local_db.js9
-rw-r--r--jstests/replsets/read_after_optime.js8
-rw-r--r--jstests/replsets/rollback_after_enabling_majority_reads.js2
-rw-r--r--jstests/sharding/allow_partial_results_nshards.js17
-rw-r--r--jstests/sharding/comment_field.js28
-rw-r--r--jstests/sharding/lagged_config_secondary.js9
-rw-r--r--jstests/sharding/libs/sharded_transactions_helpers.js13
-rw-r--r--jstests/sharding/read_after_optime.js8
-rw-r--r--jstests/sharding/read_write_concern_defaults_application.js15
-rw-r--r--jstests/sharding/shard_identity_rollback.js3
-rw-r--r--jstests/sharding/time_zone_info_mongos.js2
-rw-r--r--src/mongo/db/s/transaction_coordinator.cpp2
-rw-r--r--src/mongo/shell/check_log.js27
-rw-r--r--src/mongo/shell/replsettest.js4
33 files changed, 365 insertions, 90 deletions
diff --git a/jstests/core/getlog2.js b/jstests/core/getlog2.js
index 8568d61d206..ea37bbbb8fa 100644
--- a/jstests/core/getlog2.js
+++ b/jstests/core/getlog2.js
@@ -6,9 +6,8 @@
// # be routed to the primary.
// assumes_read_preference_unchanged,
// does_not_support_stepdowns,
-// # TODO: Handle JSON logs. See SERVER-45140
-// requires_text_logs,
// ]
+load("jstests/libs/logv2_helpers.js");
(function() {
'use strict';
@@ -31,6 +30,16 @@ function contains(arr, func) {
return false;
}
+function stringContains(haystack, needle) {
+ if (isJsonLogNoConn()) {
+ if (needle.indexOf(":"))
+ needle = '"' + needle.replace(':', "\":");
+ needle = needle.replace(/ /g, "");
+ }
+
+ return haystack.indexOf(needle) != -1;
+}
+
// test doesn't work when talking to mongos
if (db.isMaster().msg === "isdbgrid") {
return;
@@ -55,8 +64,8 @@ assert(contains(query.log, function(v) {
print(v);
const opString = db.getMongo().useReadCommands() ? " find " : " query ";
const filterString = db.getMongo().useReadCommands() ? "filter:" : "command:";
- return v.indexOf(opString) != -1 && v.indexOf(filterString) != -1 &&
- v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ return stringContains(v, opString) && stringContains(v, filterString) &&
+ stringContains(v, "keysExamined:") && stringContains(v, "docsExamined:") &&
v.indexOf("SENTINEL") != -1;
}));
@@ -76,9 +85,8 @@ assert.gt(update.log.length, 0, "no log lines");
// Ensure that slow update is logged in deail.
assert(contains(update.log, function(v) {
- print(v);
- return v.indexOf(" update ") != -1 && v.indexOf("command") != -1 &&
- v.indexOf("keysExamined:") != -1 && v.indexOf("docsExamined:") != -1 &&
+ return stringContains(v, " update ") != -1 && stringContains(v, "command") &&
+ stringContains(v, "keysExamined:") && stringContains(v, "docsExamined:") &&
v.indexOf("SENTINEL") != -1;
}));
})();
diff --git a/jstests/multiVersion/initial_sync_drop_against_last_stable.js b/jstests/multiVersion/initial_sync_drop_against_last_stable.js
index a7969a7f0d6..e36a2a8606e 100644
--- a/jstests/multiVersion/initial_sync_drop_against_last_stable.js
+++ b/jstests/multiVersion/initial_sync_drop_against_last_stable.js
@@ -2,6 +2,7 @@
* Test that CollectionCloner completes without error when a collection is dropped during cloning,
* specifically when that sync source is in 4.2.
*/
+load("jstests/libs/logv2_helpers.js");
(function() {
"use strict";
@@ -99,8 +100,12 @@ function finishTest({failPoint, expectedLog, waitForDrop, createNew}) {
assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
if (expectedLog) {
- jsTestLog(eval(expectedLog));
- checkLog.contains(secondary, eval(expectedLog));
+ expectedLog = eval(expectedLog);
+ if (isJsonLog(primaryColl.getMongo())) {
+ expectedLog = expectedLog.replace(/"/g, "\\\"");
+ }
+ jsTestLog(expectedLog);
+ checkLog.contains(secondary, expectedLog);
}
jsTestLog("Waiting for initial sync to complete.");
diff --git a/jstests/noPassthrough/aggregation_log_namespace.js b/jstests/noPassthrough/aggregation_log_namespace.js
index 274bbc3d0d0..ef7fe89d126 100644
--- a/jstests/noPassthrough/aggregation_log_namespace.js
+++ b/jstests/noPassthrough/aggregation_log_namespace.js
@@ -5,6 +5,7 @@
'use strict';
load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWriteStage.
+load("jstests/libs/logv2_helpers.js");
// Runs the given 'pipeline' and verifies that the namespace is correctly logged in the global
// log for the aggregate command. The 'comment' parameter is used to match a log entry against
@@ -12,11 +13,20 @@ load("jstests/aggregation/extras/merge_helpers.js"); // For withEachKindOfWrite
function verifyLoggedNamespace({pipeline, comment}) {
assert.commandWorked(db.runCommand(
{aggregate: source.getName(), comment: comment, pipeline: pipeline, cursor: {}}));
- checkLog.containsWithCount(
- conn,
- `command ${source.getFullName()} appName: "MongoDB Shell" ` +
- `command: aggregate { aggregate: "${source.getName()}", comment: "${comment}"`,
- 1);
+ if (isJsonLogNoConn()) {
+ checkLog.containsWithCount(
+ conn,
+ `"appName":"MongoDB Shell",` +
+ `"command":{"aggregate":"${source.getName()}","comment":"${comment}"`,
+ 1);
+
+ } else {
+ checkLog.containsWithCount(
+ conn,
+ `command ${source.getFullName()} appName: "MongoDB Shell" ` +
+ `command: aggregate { aggregate: "${source.getName()}", comment: "${comment}"`,
+ 1);
+ }
}
const mongodOptions = {};
diff --git a/jstests/noPassthrough/bind_localhost.js b/jstests/noPassthrough/bind_localhost.js
index 242b559831d..df4e7155dac 100644
--- a/jstests/noPassthrough/bind_localhost.js
+++ b/jstests/noPassthrough/bind_localhost.js
@@ -7,7 +7,7 @@ const mongo = MongoRunner.runMongod({ipv6: '', bind_ip: 'localhost', useLogFiles
assert.neq(mongo, null, "Database is not running");
const log = cat(mongo.fullOptions.logFile);
print(log);
-assert(log.includes('Listening on 127.0.0.1'), "Not listening on AF_INET");
+assert(log.match(/Listening on.*127.0.0.1/), "Not listening on AF_INET");
if (!_isWindows()) {
assert(log.match(/Listening on .*\.sock/), "Not listening on AF_UNIX");
}
diff --git a/jstests/noPassthrough/data_consistency_checks.js b/jstests/noPassthrough/data_consistency_checks.js
index 94c44f3e49b..6da0ebffb61 100644
--- a/jstests/noPassthrough/data_consistency_checks.js
+++ b/jstests/noPassthrough/data_consistency_checks.js
@@ -4,6 +4,7 @@
*
* @tags: [requires_replication, requires_sharding]
*/
+load("jstests/libs/logv2_helpers.js");
// The global 'db' variable is used by the data consistency hooks.
var db;
@@ -17,12 +18,23 @@ TestData.skipCollectionAndIndexValidation = true;
TestData.skipCheckDBHashes = true;
function makePatternForDBHash(dbName) {
+ if (isJsonLogNoConn()) {
+ return new RegExp(
+ `slow query.*"ns":"${dbName}\\.\\$cmd","appName":"MongoDB Shell","command":{"db[Hh]ash`,
+ "g");
+ }
return new RegExp(
"COMMAND.*command " + dbName + "\\.\\$cmd appName: \"MongoDB Shell\" command: db[Hh]ash",
"g");
}
function makePatternForValidate(dbName, collName) {
+ if (isJsonLogNoConn()) {
+ return new RegExp(
+ `slow query.*"ns":"${
+ dbName}\\.\\$cmd","appName":"MongoDB Shell","command":{"validate":"${collName}"`,
+ "g");
+ }
return new RegExp("COMMAND.*command " + dbName +
"\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" +
collName + "\"",
diff --git a/jstests/noPassthrough/index_stepdown_unique.js b/jstests/noPassthrough/index_stepdown_unique.js
index 02be8163efe..7855c9bd711 100644
--- a/jstests/noPassthrough/index_stepdown_unique.js
+++ b/jstests/noPassthrough/index_stepdown_unique.js
@@ -49,7 +49,7 @@ assert.neq(0, exitCode, 'expected shell to exit abnormally due to index build be
// Wait for the IndexBuildCoordinator thread, not the command thread, to report the index build
// as failed.
-checkLog.contains(primary, '[IndexBuildsCoordinatorMongod-0] Index build failed: ');
+checkLog.contains(primary, /\[IndexBuildsCoordinatorMongod-0\].*Index build failed: /);
IndexBuildTest.assertIndexes(coll, 1, ['_id_']);
rst.stopSet();
diff --git a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
index 9de22e79de5..5d1e2c4f98d 100644
--- a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
+++ b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
@@ -4,6 +4,8 @@
* protocols.
* @tags: [requires_replication, requires_sharding]
*/
+load("jstests/libs/logv2_helpers.js");
+
(function() {
"use strict";
@@ -97,7 +99,7 @@ function runLoggingTests({db, readWriteMode, slowMs, logLevel, sampleRate}) {
// which we do not expect them to appear.
const ignoreFields =
(isMongos
- ? ["docsExamined", "keysExamined", "keysInserted", "keysDeleted", "planSummary",
+ ? ["docsExamined", "keysExamined", "keysInserted", "keysDeleted", "planSummary",
"usedDisk", "hasSortStage"]
: ["nShards"]);
@@ -419,9 +421,16 @@ function findMatchingLogLine(logLines, fields, ignoreFields) {
Object.keys(fields).filter((fieldName) => !ignoreFields.includes(fieldName));
return fieldNames.every((fieldName) => {
const fieldValue = fields[fieldName];
- let regex = escapeRegex(fieldName) + ":? ?(" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
+ let regex;
+ if (isJsonLogNoConn()) {
+ regex = "\"" + escapeRegex(fieldName) + "\":? ?(" +
+ escapeRegex(checkLog.formatAsJsonLogLine(fieldValue)) + "|" +
+ escapeRegex(checkLog.formatAsJsonLogLine(fieldValue, true)) + ")";
+ } else {
+ regex = escapeRegex(fieldName) + ":? ?(" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
+ }
const match = line.match(regex);
return match && match[0];
});
diff --git a/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js b/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
index 0436853f8cc..3da0114f73c 100644
--- a/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
+++ b/jstests/noPassthrough/validate_hook_resume_fcv_upgrade.js
@@ -3,6 +3,8 @@
* server regardless of what state any previous upgrades or downgrades have left it in.
*/
+load("jstests/libs/logv2_helpers.js");
+
// The global 'db' variable is used by the data consistency hooks.
var db;
@@ -14,6 +16,13 @@ var db;
TestData.skipCollectionAndIndexValidation = true;
function makePatternForValidate(dbName, collName) {
+ if (isJsonLogNoConn()) {
+ return new RegExp(
+ `slow query.*"ns":"${
+ dbName}\\.\\$cmd","appName":"MongoDB Shell","command":{"validate":"${collName}"`,
+ "g");
+ }
+
return new RegExp("COMMAND.*command " + dbName +
"\\.\\$cmd appName: \"MongoDB Shell\" command: validate { validate: \"" +
collName + "\"",
@@ -21,6 +30,12 @@ function makePatternForValidate(dbName, collName) {
}
function makePatternForSetFCV(targetVersion) {
+ if (isJsonLogNoConn()) {
+ return new RegExp(
+ `slow query.*"appName":"MongoDB Shell","command":{"setFeatureCompatibilityVersion":"${
+ targetVersion}"`,
+ "g");
+ }
return new RegExp(
"COMMAND.*command.*appName: \"MongoDB Shell\" command: setFeatureCompatibilityVersion" +
" { setFeatureCompatibilityVersion: \"" + targetVersion + "\"",
diff --git a/jstests/noPassthrough/wt_operation_stats.js b/jstests/noPassthrough/wt_operation_stats.js
index f3bc4f5aa9d..39f32742ca7 100644
--- a/jstests/noPassthrough/wt_operation_stats.js
+++ b/jstests/noPassthrough/wt_operation_stats.js
@@ -10,8 +10,12 @@
'use strict';
load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
+load("jstests/libs/logv2_helpers.js");
-const readStatRegx = /storage:{ data: { bytesRead: ([0-9]+)/;
+let readStatRegx = /storage:{ data: { bytesRead: ([0-9]+)/;
+if (isJsonLogNoConn()) {
+ readStatRegx = /"storage":{"data":{"bytesRead":([0-9]+)/;
+}
let checkLogStats = function() {
// Check if the log output contains the expected statistics.
diff --git a/jstests/replsets/collection_clone_resume_after_network_error.js b/jstests/replsets/collection_clone_resume_after_network_error.js
index f16b9082f43..d011e18ed2e 100644
--- a/jstests/replsets/collection_clone_resume_after_network_error.js
+++ b/jstests/replsets/collection_clone_resume_after_network_error.js
@@ -13,27 +13,37 @@
load("jstests/replsets/rslib.js"); // For setLogVerbosity()
load("jstests/libs/fail_point_util.js");
+load("jstests/libs/logv2_helpers.js");
// Verify the 'find' command received by the primary includes a resume token request.
function checkHasRequestResumeToken() {
- checkLog.contains(primary, "$_requestResumeToken: true");
+ checkLog.contains(primary, /\$_requestResumeToken"?: ?true/);
}
// Verify the 'find' command received by the primary has no resumeAfter (yet).
function checkNoResumeAfter() {
assert.throws(function() {
- checkLog.contains(primary, "$_resumeAfter", 3 * 1000);
+ checkLog.contains(primary, /\$_resumeAfter/, 3 * 1000);
});
}
// Verify the 'find' command received by the primary has resumeAfter set with the given recordId.
function checkHasResumeAfter(recordId) {
- checkLog.contains(primary, "$_resumeAfter: { $recordId: " + recordId + " }");
+ if (isJsonLogNoConn()) {
+ checkLog.contains(primary, `"$_resumeAfter":{"$recordId":${recordId}}`);
+ } else {
+ checkLog.contains(primary, "$_resumeAfter: { $recordId: " + recordId + " }");
+ }
}
// Verify that we sent a killCursors command on the namespace we are cloning.
function checkHasKillCursors() {
- checkLog.contains(primary, "command test.test command: killCursors { killCursors: \"test\"");
+ if (isJsonLogNoConn()) {
+ checkLog.contains(primary, /"ns":"test.test",.*"command":{"killCursors":"test"/);
+ } else {
+ checkLog.contains(primary,
+ "command test.test command: killCursors { killCursors: \"test\"");
+ }
}
const beforeRetryFailPointName = "hangBeforeRetryingClonerStage";
diff --git a/jstests/replsets/create_drop_database_different_casing.js b/jstests/replsets/create_drop_database_different_casing.js
index 571563a43cf..708acf8832f 100644
--- a/jstests/replsets/create_drop_database_different_casing.js
+++ b/jstests/replsets/create_drop_database_different_casing.js
@@ -13,6 +13,8 @@
*
* @tags: [requires_replication]
*/
+load("jstests/libs/logv2_helpers.js");
+
(function() {
'use strict';
@@ -45,7 +47,12 @@ assert.commandFailedWithCode(lowerDB.createCollection("test"), ErrorCodes.Databa
rst.awaitReplication();
failPoint.off();
-checkLog.contains(primary, "dropDatabase " + dbNameUpper + " - finished");
+if (isJsonLog(primary)) {
+ checkLog.contains(primary,
+ `dropDatabase {dbName} - finished","attr":{"dbName":"${dbNameUpper}"}}`);
+} else {
+ checkLog.contains(primary, "dropDatabase " + dbNameUpper + " - finished");
+}
assert.commandWorked(lowerDB.createCollection("test"));
awaitDropUpper();
diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js
index 691f2146821..28a58bbd1be 100644
--- a/jstests/replsets/drop_databases_two_phase.js
+++ b/jstests/replsets/drop_databases_two_phase.js
@@ -21,6 +21,7 @@
load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
load("jstests/replsets/rslib.js");
+load("jstests/libs/logv2_helpers.js");
// Returns a list of all collections in a given database. Use 'args' as the
// 'listCollections' command arguments.
@@ -151,8 +152,17 @@ jsTestLog('Waiting for dropDatabase command on ' + primary.host + ' to complete.
var exitCode = dropDatabaseProcess();
let db = primary.getDB(dbNameToDrop);
-checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
-checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
+if (isJsonLog(db.getMongo())) {
+ checkLog.contains(db.getMongo(),
+ `dropping collection: {nss}","attr":{"dbName":"${dbNameToDrop}","nss":"${
+ dbNameToDrop}.${collNameToDrop}"`);
+ checkLog.contains(
+ db.getMongo(),
+ 'dropped {numCollections} collection(s)","attr":{"dbName":"dbToDrop","numCollections":1}');
+} else {
+ checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
+ checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
+}
assert.eq(0, exitCode, 'dropDatabase command on ' + primary.host + ' failed.');
jsTestLog('Completed dropDatabase command on ' + primary.host);
diff --git a/jstests/replsets/initial_sync_drop_collection.js b/jstests/replsets/initial_sync_drop_collection.js
index de375ffc73c..17916df749b 100644
--- a/jstests/replsets/initial_sync_drop_collection.js
+++ b/jstests/replsets/initial_sync_drop_collection.js
@@ -9,6 +9,7 @@
load("jstests/libs/fail_point_util.js");
load('jstests/replsets/libs/two_phase_drops.js');
load("jstests/libs/uuid_util.js");
+load("jstests/libs/logv2_helpers.js");
// Set up replica set. Disallow chaining so nodes always sync from primary.
const testName = "initial_sync_drop_collection";
@@ -64,7 +65,9 @@ function setupTest({failPoint, extraFailPointData, secondaryStartupParams}) {
function finishTest({failPoint, expectedLog, waitForDrop, createNew}) {
// Get the uuid for use in checking the log line.
- const uuid = extractUUIDFromObject(getUUIDFromListCollections(primaryDB, collName));
+ const uuid_obj = getUUIDFromListCollections(primaryDB, collName);
+ const uuid = extractUUIDFromObject(uuid_obj);
+ const uuid_base64 = uuid_obj.base64();
jsTestLog("Dropping collection on primary: " + primaryColl.getFullName());
assert(primaryColl.drop());
@@ -93,8 +96,9 @@ function finishTest({failPoint, expectedLog, waitForDrop, createNew}) {
assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
if (expectedLog) {
- jsTestLog(eval(expectedLog));
- checkLog.contains(secondary, eval(expectedLog));
+ expectedLog = eval(expectedLog);
+ jsTestLog(expectedLog);
+ checkLog.contains(secondary, expectedLog);
}
jsTestLog("Waiting for initial sync to complete.");
@@ -134,6 +138,11 @@ runDropTest({
let expectedLogFor3and4 =
"`CollectionCloner ns: '${nss}' uuid: UUID(\"${uuid}\") stopped because collection was dropped on source.`";
+if (isJsonLogNoConn()) {
+ expectedLogFor3and4 =
+ '`CollectionCloner ns: \'{getCloner_getSourceNss}\' uuid: UUID("{getCloner_getSourceUuid}") stopped because collection was dropped on source.","attr":{"getCloner_getSourceNss":"${nss}","getCloner_getSourceUuid":{"uuid":{"$binary":{"base64":"${uuid_base64}","subType":"4"}}}}}`';
+}
+
// We don't support 4.2 style two-phase drops with EMRC=false - in that configuration, the
// collection will instead be renamed to a <db>.system.drop.* namespace before being dropped. Since
// the cloner queries collection by UUID, it will observe the first drop phase as a rename.
@@ -169,8 +178,9 @@ runDropTest({
failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
secondaryStartupParams: {collectionClonerBatchSize: 1},
waitForDrop: true,
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: UUID(\"${uuid}\") stopped because collection was dropped on source.`"
+ expectedLog: isJsonLogNoConn()
+ ? '`CollectionCloner ns: \'{getCloner_getSourceNss}\' uuid: UUID("{getCloner_getSourceUuid}") stopped because collection was dropped on source.","attr":{"getCloner_getSourceNss":"${nss}","getCloner_getSourceUuid":{"uuid":{"$binary":{"base64":"${uuid_base64}","subType":"4"}}}}}`'
+ : "`CollectionCloner ns: '${nss}' uuid: UUID(\"${uuid}\") stopped because collection was dropped on source.`"
});
jsTestLog(
@@ -179,8 +189,9 @@ runDropTest({
failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
secondaryStartupParams: {collectionClonerBatchSize: 1},
waitForDrop: true,
- expectedLog:
- "`CollectionCloner ns: '${nss}' uuid: UUID(\"${uuid}\") stopped because collection was dropped on source.`",
+ expectedLog: isJsonLogNoConn()
+ ? '`CollectionCloner ns: \'{getCloner_getSourceNss}\' uuid: UUID("{getCloner_getSourceUuid}") stopped because collection was dropped on source.","attr":{"getCloner_getSourceNss":"${nss}","getCloner_getSourceUuid":{"uuid":{"$binary":{"base64":"${uuid_base64}","subType":"4"}}}}}`'
+ : "`CollectionCloner ns: '${nss}' uuid: UUID(\"${uuid}\") stopped because collection was dropped on source.`",
createNew: true
});
diff --git a/jstests/replsets/initial_sync_invalid_index_spec.js b/jstests/replsets/initial_sync_invalid_index_spec.js
index 24f8b773118..06b05305838 100644
--- a/jstests/replsets/initial_sync_invalid_index_spec.js
+++ b/jstests/replsets/initial_sync_invalid_index_spec.js
@@ -2,6 +2,7 @@
* Confirm that replica members undergoing initial sync fail if an invalid index specification is
* encountered (where index version is >= 2).
*/
+load("jstests/libs/logv2_helpers.js");
(function() {
"use strict";
@@ -48,9 +49,16 @@ replTest.stop(initSyncNode, undefined, {allowedExitCode: MongoRunner.EXIT_ABRUPT
const msgInvalidOption = "The field 'invalidOption' is not valid for an index specification";
const msgInitialSyncFatalAssertion = "Fatal assertion 40088 InitialSyncFailure";
-assert(rawMongoProgramOutput().match(msgInvalidOption) &&
- rawMongoProgramOutput().match(msgInitialSyncFatalAssertion),
- "Initial sync should have aborted on invalid index specification");
+if (isJsonLogNoConn()) {
+ assert(
+ rawMongoProgramOutput().search(
+ /Fatal assertion*40088.*InitialSyncFailure.*The field 'invalidOption' is not valid for an index specification/),
+ "Replication should have aborted on invalid index specification");
+} else {
+ assert(rawMongoProgramOutput().match(msgInvalidOption) &&
+ rawMongoProgramOutput().match(msgInitialSyncFatalAssertion),
+ "Initial sync should have aborted on invalid index specification");
+}
replTest.stopSet();
})();
diff --git a/jstests/replsets/initial_sync_rename_collection.js b/jstests/replsets/initial_sync_rename_collection.js
index cc07397cd90..cbe4e4b4053 100644
--- a/jstests/replsets/initial_sync_rename_collection.js
+++ b/jstests/replsets/initial_sync_rename_collection.js
@@ -9,6 +9,7 @@
load("jstests/libs/fail_point_util.js");
load("jstests/libs/uuid_util.js");
load('jstests/replsets/libs/two_phase_drops.js');
+load("jstests/libs/logv2_helpers.js");
// Set up replica set. Disallow chaining so nodes always sync from primary.
const testName = "initial_sync_rename_collection";
@@ -72,7 +73,9 @@ function setupTest({failPoint, extraFailPointData, secondaryStartupParams}) {
function finishTest({failPoint, expectedLog, createNew, renameAcrossDBs}) {
// Get the uuid for use in checking the log line.
- const uuid = extractUUIDFromObject(getUUIDFromListCollections(primaryDB, collName));
+ const uuid_obj = getUUIDFromListCollections(primaryDB, collName);
+ const uuid = extractUUIDFromObject(uuid_obj);
+ const uuid_base64 = uuid_obj.base64();
const target = (renameAcrossDBs ? pCrossDBRenameColl : pRenameColl);
jsTestLog("Renaming collection on primary: " + target.getFullName());
@@ -101,8 +104,9 @@ function finishTest({failPoint, expectedLog, createNew, renameAcrossDBs}) {
assert.commandWorked(secondary.adminCommand({configureFailPoint: failPoint, mode: 'off'}));
if (expectedLog) {
- jsTestLog(eval(expectedLog));
- checkLog.contains(secondary, eval(expectedLog));
+ expectedLog = eval(expectedLog);
+ jsTestLog(expectedLog);
+ checkLog.contains(secondary, expectedLog);
}
jsTestLog("Waiting for initial sync to complete.");
@@ -158,18 +162,28 @@ runRenameTest({
renameAcrossDBs: true
});
+const expectedLogFor5and7 = isJsonLogNoConn()
+ ?
+
+ '`Initial Sync retrying {getClonerName} stage {stage_getName} due to {lastError}","attr":{"getClonerName":"CollectionCloner","stage_getName":"query","lastError":{"code":175,"codeName":"QueryPlanKilled","errmsg":"collection renamed from \'${nss}\' to \'${rnss}\'. UUID ${uuid}"}}}`'
+ : "`Initial Sync retrying CollectionCloner stage query due to QueryPlanKilled: collection renamed from '${nss}' to '${rnss}'. UUID ${uuid}`";
+
jsTestLog("[5] Testing rename between getMores.");
runRenameTest({
failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
secondaryStartupParams: {collectionClonerBatchSize: 1},
- expectedLog:
- "`Initial Sync retrying CollectionCloner stage query due to QueryPlanKilled: collection renamed from '${nss}' to '${rnss}'. UUID ${uuid}`"
+ expectedLog: expectedLogFor5and7
});
// A cross-DB rename will appear as a drop in the context of the source DB.
let expectedLogFor6and8 =
"`CollectionCloner ns: '${nss}' uuid: UUID(\"${uuid}\") stopped because collection was dropped on source.`";
+if (isJsonLogNoConn()) {
+ expectedLogFor6and8 =
+ '`CollectionCloner ns: \'{getCloner_getSourceNss}\' uuid: UUID("{getCloner_getSourceUuid}") stopped because collection was dropped on source.","attr":{"getCloner_getSourceNss":"${nss}","getCloner_getSourceUuid":{"uuid":{"$binary":{"base64":"${uuid_base64}","subType":"4"}}}}}`';
+}
+
// We don't support 4.2 style two-phase drops with EMRC=false - in that configuration, the
// collection will instead be renamed to a <db>.system.drop.* namespace before being dropped. Since
// the cloner queries collection by UUID, it will observe the first drop phase as a rename.
@@ -191,8 +205,7 @@ jsTestLog("[7] Testing rename with new same-name collection created, between get
runRenameTest({
failPoint: "initialSyncHangCollectionClonerAfterHandlingBatchResponse",
secondaryStartupParams: {collectionClonerBatchSize: 1},
- expectedLog:
- "`Initial Sync retrying CollectionCloner stage query due to QueryPlanKilled: collection renamed from '${nss}' to '${rnss}'. UUID ${uuid}`"
+ expectedLog: expectedLogFor5and7
});
jsTestLog("[8] Testing cross-DB rename with new same-name collection created, between getMores.");
diff --git a/jstests/replsets/initial_sync_test_fixture_test.js b/jstests/replsets/initial_sync_test_fixture_test.js
index c8ae0e98a16..e09c48c97e1 100644
--- a/jstests/replsets/initial_sync_test_fixture_test.js
+++ b/jstests/replsets/initial_sync_test_fixture_test.js
@@ -17,14 +17,17 @@
load("jstests/core/txns/libs/prepare_helpers.js");
load("jstests/replsets/libs/initial_sync_test.js");
+load("jstests/libs/logv2_helpers.js");
/**
* Helper function to check that specific messages appeared or did not appear in the logs.
*/
function checkLogForMsg(node, msg, contains) {
if (contains) {
+ jsTest.log("Check for presence of message (" + node.port + "): |" + msg + "|");
assert(checkLog.checkContainsOnce(node, msg));
} else {
+ jsTest.log("Check for absence of message (" + node.port + "): |" + msg + "|");
assert(!checkLog.checkContainsOnce(node, msg));
}
}
@@ -48,8 +51,18 @@ function checkLogForGetTimestampMsg(node, timestampName, timestamp, contains) {
function checkLogForCollectionClonerMsg(node, commandName, dbname, contains, collUUID) {
let msg =
"Collection Cloner scheduled a remote command on the " + dbname + " db: { " + commandName;
+
+ if (isJsonLog(node)) {
+ msg =
+ 'Collection Cloner scheduled a remote command on the {describeForFuzzer_stage}","attr":{"describeForFuzzer_stage":"' +
+ dbname + " db: { " + commandName;
+ }
+
if (commandName === "listIndexes" && contains) {
msg += ": " + collUUID;
+ if (isJsonLog(node)) {
+ msg = msg.replace('("', '(\\"').replace('")', '\\")');
+ }
}
checkLogForMsg(node, msg, contains);
diff --git a/jstests/replsets/invalid_index_spec.js b/jstests/replsets/invalid_index_spec.js
index 2889ae08da7..35e1491bcde 100644
--- a/jstests/replsets/invalid_index_spec.js
+++ b/jstests/replsets/invalid_index_spec.js
@@ -7,6 +7,7 @@
"use strict";
load("jstests/replsets/rslib.js");
+load("jstests/libs/logv2_helpers.js");
const testName = "invalid_index_spec";
const replTest = new ReplSetTest({nodes: 2});
@@ -53,11 +54,17 @@ const msgIndexBuildsCoordinator = "Fatal assertion 34437";
const msgIndexErrorType = "InvalidIndexSpecificationOption";
const msgIndexError = "The field 'invalidOption2'";
-assert((rawMongoProgramOutput().match(msgIndexBuilder) ||
- rawMongoProgramOutput().match(msgIndexBuildsCoordinator)) &&
- (rawMongoProgramOutput().match(msgIndexErrorType) &&
- rawMongoProgramOutput().match(msgIndexError)),
- "Replication should have aborted on invalid index specification");
-
+if (isJsonLogNoConn()) {
+ assert(
+ rawMongoProgramOutput().search(
+ /Fatal assertion.*(50769|34437).*InvalidIndexSpecificationOption.*The field 'invalidOption2'/),
+ "Replication should have aborted on invalid index specification");
+} else {
+ assert((rawMongoProgramOutput().match(msgIndexBuilder) ||
+ rawMongoProgramOutput().match(msgIndexBuildsCoordinator)) &&
+ (rawMongoProgramOutput().match(msgIndexErrorType) &&
+ rawMongoProgramOutput().match(msgIndexError)),
+ "Replication should have aborted on invalid index specification");
+}
replTest.stopSet();
})();
diff --git a/jstests/replsets/last_error_reported_after_stepdown.js b/jstests/replsets/last_error_reported_after_stepdown.js
index 0947473aed1..23250803aa5 100644
--- a/jstests/replsets/last_error_reported_after_stepdown.js
+++ b/jstests/replsets/last_error_reported_after_stepdown.js
@@ -2,6 +2,8 @@
* Tests that writes which complete before stepdown correctly report their errors after the
* stepdown.
*/
+load("jstests/libs/logv2_helpers.js");
+
(function() {
"use strict";
@@ -28,6 +30,26 @@ assert.commandWorked(
{writeConcern: {w: 1}}));
rst.awaitReplication();
+function getLogMsgFilter(command, collection) {
+ if (isJsonLogNoConn()) {
+ command = command.replace(/ /g, "");
+ let logMsg = `"type":"${command}",`;
+ if (collection != undefined) {
+ logMsg += `"ns":"${collection}",`;
+ } else {
+ logMsg += '.*';
+ }
+ logMsg += '"appName":"MongoDB Shell",';
+ return RegExp(logMsg);
+ }
+
+ let logMsg = command;
+ if (collection != undefined) {
+ logMsg += collection;
+ }
+ return logMsg + ' appName: "MongoDB Shell"';
+}
+
// Note that "operation" should always be on primaryDataConn, so the stepdown doesn't clear
// the last error.
function runStepDownTest({description, logMsg, operation, errorCode, nDocs}) {
@@ -37,7 +59,7 @@ function runStepDownTest({description, logMsg, operation, errorCode, nDocs}) {
primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 1}}));
operation();
// Wait for the operation to complete.
- checkLog.contains(primary, logMsg + ' appName: "MongoDB Shell"');
+ checkLog.contains(primary, logMsg);
assert.commandWorked(
primaryAdmin.adminCommand({setParameter: 1, logComponentVerbosity: {command: 0}}));
assert.commandWorked(primaryAdmin.adminCommand({replSetStepDown: 60, force: true}));
@@ -66,18 +88,18 @@ function runStepDownTest({description, logMsg, operation, errorCode, nDocs}) {
assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
runStepDownTest({
description: "insert",
- logMsg: "insert " + coll.getFullName(),
+ logMsg: getLogMsgFilter("insert ", coll.getFullName()),
operation: () => coll.insert({_id: 0})
});
runStepDownTest({
description: "update",
- logMsg: "update ",
+ logMsg: getLogMsgFilter("update "),
operation: () => coll.update({_id: 'updateme'}, {'$inc': {x: 1}}),
nDocs: 1
});
runStepDownTest({
description: "remove",
- logMsg: "remove ",
+ logMsg: getLogMsgFilter("remove "),
operation: () => coll.remove({_id: 'deleteme'}),
nDocs: 1
});
@@ -87,20 +109,20 @@ runStepDownTest({
assert.commandWorked(primaryAdmin.adminCommand({clearLog: 'global'}));
runStepDownTest({
description: "insert with error",
- logMsg: "insert " + coll.getFullName(),
+ logMsg: getLogMsgFilter("insert ", coll.getFullName()),
operation: () => coll.insert({_id: 0}),
errorCode: ErrorCodes.DuplicateKey
});
runStepDownTest({
description: "update with error",
- logMsg: "update ",
+ logMsg: getLogMsgFilter("update "),
operation: () => coll.update({_id: 'updateme'}, {'$inc': {nullfield: 1}}),
errorCode: ErrorCodes.TypeMismatch,
nDocs: 0
});
runStepDownTest({
description: "remove with error",
- logMsg: "remove ",
+ logMsg: getLogMsgFilter("remove "),
operation: () => coll.remove({'$nonsense': {x: 1}}),
errorCode: ErrorCodes.BadValue,
nDocs: 0
diff --git a/jstests/replsets/libs/election_handoff.js b/jstests/replsets/libs/election_handoff.js
index 895aeda7522..9e6749ac13a 100644
--- a/jstests/replsets/libs/election_handoff.js
+++ b/jstests/replsets/libs/election_handoff.js
@@ -6,6 +6,7 @@
var ElectionHandoffTest = (function() {
load("jstests/replsets/rslib.js");
+ load("jstests/libs/logv2_helpers.js");
const kStepDownPeriodSecs = 30;
const kSIGTERM = 15;
@@ -71,13 +72,26 @@ var ElectionHandoffTest = (function() {
// The checkLog() function blocks until the log line appears.
checkLog.contains(expectedCandidate, "Starting an election due to step up request");
- // If there are only two nodes in the set, verify that the old primary voted "yes".
- if (numNodes === 2) {
- checkLog.contains(expectedCandidate,
- `skipping dry run and running for election in term ${term + 1}`);
- checkLog.contains(
- expectedCandidate,
- `VoteRequester(term ${term + 1}) received a yes vote from ${primary.host}`);
+ if (isJsonLog(expectedCandidate)) {
+ // If there are only two nodes in the set, verify that the old primary voted "yes".
+ if (numNodes === 2) {
+ checkLog.contains(
+ expectedCandidate,
+ `skipping dry run and running for election in term {newTerm}","attr":{"newTerm":${
+ term + 1}}}`);
+ checkLog.contains(
+ expectedCandidate,
+ `VoteRequester(term ${term + 1}) received a yes vote from ${primary.host}`);
+ }
+ } else {
+ // If there are only two nodes in the set, verify that the old primary voted "yes".
+ if (numNodes === 2) {
+ checkLog.contains(expectedCandidate,
+ `skipping dry run and running for election in term ${term + 1}`);
+ checkLog.contains(
+ expectedCandidate,
+ `VoteRequester(term ${term + 1}) received a yes vote from ${primary.host}`);
+ }
}
rst.awaitNodesAgreeOnPrimary();
diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
index 8289d7ee576..a3a395f3c1f 100644
--- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js
+++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
@@ -6,6 +6,8 @@
// We verify this requirement by running a map-reduce, examining the logs to find the names of
// all collections created, and checking the oplog for entries logging the creation of each of those
// collections.
+load("jstests/libs/logv2_helpers.js");
+
(function() {
"use strict";
@@ -44,7 +46,12 @@ assert.commandWorked(result);
const logLines = checkLog.getGlobalLog(primaryDB);
let createdCollections = [];
logLines.forEach(function(line) {
- let matchResult = line.match(/createCollection: (.+) with/);
+ let matchResult;
+ if (isJsonLogNoConn()) {
+ line.match(/createCollection: (.+) with/);
+ } else {
+ matchResult = line.match(/createCollection: .+ with.*"nss":"(.*)"/);
+ }
if (matchResult) {
createdCollections.push(matchResult[1]);
}
diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js
index 63b049414f6..5759ac824b0 100644
--- a/jstests/replsets/read_after_optime.js
+++ b/jstests/replsets/read_after_optime.js
@@ -1,4 +1,5 @@
// Test read after opTime functionality with maxTimeMS.
+load("jstests/libs/logv2_helpers.js");
(function() {
"use strict";
@@ -38,6 +39,13 @@ var runTest = function(testDB, primaryConn) {
var msg = 'Command on database ' + testDB.getName() +
' timed out waiting for read concern to be satisfied. Command:';
+
+ if (isJsonLogNoConn()) {
+ msg = new RegExp(
+ `Command on database {request_getDatabase} timed out waiting for read concern to be satisfied. Command:.*"attr":{"request_getDatabase":"${
+ testDB.getName()}",.*`);
+ }
+
checkLog.containsWithCount(testDB.getMongo(), msg, 1);
// Read concern timed out message should not be logged.
diff --git a/jstests/replsets/rollback_after_enabling_majority_reads.js b/jstests/replsets/rollback_after_enabling_majority_reads.js
index 09d3e67d582..a4ba0569afd 100644
--- a/jstests/replsets/rollback_after_enabling_majority_reads.js
+++ b/jstests/replsets/rollback_after_enabling_majority_reads.js
@@ -51,7 +51,7 @@ jsTest.log("Attempt to roll back. This will fassert.");
rollbackTest.transitionToSyncSourceOperationsBeforeRollback();
rollbackTest.transitionToSyncSourceOperationsDuringRollback();
assert.soon(() => {
- return rawMongoProgramOutput().indexOf("Fatal Assertion 51121") !== -1;
+ return rawMongoProgramOutput().search(/Fatal Assertion.+51121/) != -1;
});
jsTest.log(
diff --git a/jstests/sharding/allow_partial_results_nshards.js b/jstests/sharding/allow_partial_results_nshards.js
index 26aa7198500..fe4bf95fbd7 100644
--- a/jstests/sharding/allow_partial_results_nshards.js
+++ b/jstests/sharding/allow_partial_results_nshards.js
@@ -3,6 +3,8 @@
* the number of shards that were actually available during each find or getMore operation.
* @tags: [requires_replication, requires_sharding, requires_fcv_44]
*/
+load("jstests/libs/logv2_helpers.js");
+
(function() {
"use strict";
@@ -52,16 +54,23 @@ function assertMatchingLogLineExists(fields) {
const fieldNames = Object.keys(fields);
return fieldNames.every((fieldName) => {
const fieldValue = fields[fieldName];
- let regex = escapeRegex(fieldName) + ":? ?(" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
- escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
+ let regex;
+ if (isJsonLogNoConn()) {
+ regex = "\"" + escapeRegex(fieldName) + "\":? ?(" +
+ escapeRegex(checkLog.formatAsJsonLogLine(fieldValue)) + "|" +
+ escapeRegex(checkLog.formatAsJsonLogLine(fieldValue, true)) + ")";
+ } else {
+ regex = escapeRegex(fieldName) + ":? ?(" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue)) + "|" +
+ escapeRegex(checkLog.formatAsLogLine(fieldValue, true)) + ")";
+ }
const match = line.match(regex);
return match && match[0];
});
}
const globalLog = assert.commandWorked(mongosDB.adminCommand({getLog: "global"}));
- assert(globalLog.log.find((line) => lineMatches(line, fields)));
+ assert(globalLog.log.find((line) => lineMatches(line, fields)), "failed to find log line ");
}
// Issue a query with {allowPartialResults:true} on the collection. We sort by {_id:1} so that all
diff --git a/jstests/sharding/comment_field.js b/jstests/sharding/comment_field.js
index 293839e6db8..a155c2ed0c0 100644
--- a/jstests/sharding/comment_field.js
+++ b/jstests/sharding/comment_field.js
@@ -3,13 +3,14 @@
* and profiler. This test also verifies that for a sharded collection, the 'comment' fields gets
* passed on from mongos to the respective shards.
*
- * @tags: [requires_fcv_44, requires_text_logs]
+ * @tags: [requires_fcv_44]
*/
(function() {
"use strict";
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions.
+load("jstests/libs/logv2_helpers.js");
// This test runs manual getMores using different connections, which will not inherit the
// implicit session of the cursor establishing command.
@@ -53,7 +54,7 @@ function verifyLogContains(connections, inputArray, expectedNumOccurrences) {
numOccurrences += ((numMatches == inputArray.length) ? 1 : 0);
}
}
- assert.eq(expectedNumOccurrences, numOccurrences);
+ assert.eq(expectedNumOccurrences, numOccurrences, "Failed to find messages " + inputArray);
}
function setPostCommandFailpointOnShards({mode, options}) {
@@ -162,15 +163,24 @@ function runCommentParamTest({
// Run the 'checkLog' only for commands with uuid so that the we know the log line belongs to
// current operation.
if (commentObj["uuid"]) {
+ // Verify that a field with 'comment' exists in the same line as the command.
+ let expectStrings = [
+ ", comment: ",
+ checkLog.formatAsLogLine(commentObj),
+ 'appName: "MongoDB Shell" command: ' + ((cmdName === "getMore") ? cmdName : "")
+ ];
+ if (isJsonLog(testDB)) {
+ expectStrings = [
+ ',"comment":',
+ checkLog.formatAsJsonLogLine(commentObj),
+ '"appName":"MongoDB Shell","command":{' +
+ ((cmdName === "getMore") ? '"' + cmdName + '"' : "")
+ ];
+ }
+
verifyLogContains(
[testDB, shard0DB, shard1DB],
- // Verify that a field with 'comment' exists in the same line as the command.
- [
- ", comment: ",
- checkLog.formatAsLogLine(commentObj),
- 'appName: "MongoDB Shell" command: ' + ((cmdName === "getMore") ? cmdName : "")
- ],
-
+ expectStrings,
((cmdName === "update" || cmdName === "delete")
? expectedRunningOps
: 0) + // For 'update' and 'delete' commands we also log an additional line
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index 5cc0504deca..8cfbc06fd8d 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -3,6 +3,8 @@
* are behind the majority opTime.
*/
+load("jstests/libs/logv2_helpers.js");
+
// Checking UUID and index consistency involves mongos being able to do a read from the config
// server, but this test is designed to make mongos time out when reading from the config server.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
@@ -48,13 +50,18 @@ assert(ErrorCodes.isExceededTimeLimitError(exception.code));
let msgAA = 'command config.$cmd command: find { find: "databases"';
let msgAB = 'errCode:' + ErrorCodes.ClientDisconnect;
let msgB = 'Command on database config timed out waiting for read concern to be satisfied.';
+if (isJsonLogNoConn()) {
+ msgB =
+ /Command on database {request_getDatabase} timed out waiting for read concern to be satisfied.*"request_getDatabase":"config"/;
+}
+
assert.soon(
function() {
var logMessages =
assert.commandWorked(delayedConfigSecondary.adminCommand({getLog: 'global'})).log;
for (var i = 0; i < logMessages.length; i++) {
if ((logMessages[i].indexOf(msgAA) != -1 && logMessages[i].indexOf(msgAB) != -1) ||
- logMessages[i].indexOf(msgB) != -1) {
+ logMessages[i].search(msgB) != -1) {
return true;
}
}
diff --git a/jstests/sharding/libs/sharded_transactions_helpers.js b/jstests/sharding/libs/sharded_transactions_helpers.js
index 0fdd971a834..38579460dce 100644
--- a/jstests/sharding/libs/sharded_transactions_helpers.js
+++ b/jstests/sharding/libs/sharded_transactions_helpers.js
@@ -1,3 +1,5 @@
+load("jstests/libs/logv2_helpers.js");
+
const kSnapshotErrors =
[ErrorCodes.SnapshotTooOld, ErrorCodes.SnapshotUnavailable, ErrorCodes.StaleChunkHistory];
@@ -97,6 +99,17 @@ function assertNoSuchTransactionOnConn(conn, lsid, txnNumber) {
function waitForFailpoint(hitFailpointStr, numTimes, timeout) {
// Don't run the hang analyzer because we don't expect waitForFailpoint() to always succeed.
+ if (isJsonLogNoConn()) {
+ const hitFailpointRe = /Hit (\w+) failpoint/;
+ const hitRe = /Hit (\w+)/;
+ const matchHitFailpoint = hitFailpointStr.match(hitFailpointRe);
+ const matchHit = hitFailpointStr.match(hitRe);
+ if (matchHitFailpoint) {
+ hitFailpointStr = `(Hit .+ failpoint.*${matchHitFailpoint[1]}|${hitFailpointStr})`;
+ } else {
+ hitFailpointStr = `(Hit .+.*${matchHit[1]}|${hitFailpointStr})`;
+ }
+ }
assert.soon(
function() {
const re = new RegExp(hitFailpointStr, 'g' /* find all occurrences */);
diff --git a/jstests/sharding/read_after_optime.js b/jstests/sharding/read_after_optime.js
index 11be1022efd..e5ebe321940 100644
--- a/jstests/sharding/read_after_optime.js
+++ b/jstests/sharding/read_after_optime.js
@@ -1,4 +1,5 @@
// Test read after opTime functionality with maxTimeMS on config servers (CSRS only)`.
+load("jstests/libs/logv2_helpers.js");
(function() {
'use strict';
@@ -36,10 +37,15 @@ assert.commandFailedWithCode(
ErrorCodes.MaxTimeMSExpired);
var msg = 'Command on database local timed out waiting for read concern to be satisfied.';
+if (isJsonLogNoConn()) {
+ msg =
+ /Command on database {request_getDatabase} timed out waiting for read concern to be satisfied.*"request_getDatabase":"local"/;
+}
+
assert.soon(function() {
var logMessages = assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
+ if (logMessages[i].search(msg) != -1) {
return true;
}
}
diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js
index 6b7e6b58c2f..72fec834018 100644
--- a/jstests/sharding/read_write_concern_defaults_application.js
+++ b/jstests/sharding/read_write_concern_defaults_application.js
@@ -24,7 +24,6 @@
* requires_fcv_44,
* requires_majority_read_concern,
* requires_profiling,
- * requires_text_logs,
* uses_transactions,
* ]
*/
@@ -32,6 +31,7 @@
"use strict";
load('jstests/libs/profiler.js');
+load("jstests/libs/logv2_helpers.js");
let db = "test";
let coll = "foo";
@@ -671,6 +671,19 @@ let setDefaultRWConcernActualTestCase = {
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
function createLogLineRegularExpressionForTestCase(test, cmdName, targetId, explicitRWC) {
let expectedProvenance = explicitRWC ? "clientSupplied" : "customDefault";
+ if (isJsonLogNoConn()) {
+ let pattern = `"command":{"${cmdName}"`;
+ pattern += `.*"comment":"${targetId}"`;
+ if (test.checkReadConcern) {
+ pattern += `.*"readConcern":{"level":"majority","provenance":"${expectedProvenance}"}`;
+ }
+ if (test.checkWriteConcern) {
+ pattern += `.*"writeConcern":{"w":"majority","wtimeout":1234567,"provenance":"${
+ expectedProvenance}"}`;
+ }
+ return new RegExp(pattern);
+ }
+
let pattern = `command: ${cmdName} `;
pattern += `.* comment: "${targetId}"`;
if (test.checkReadConcern) {
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index 9c5dd81da1b..912645b875d 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -85,8 +85,7 @@ jsTest.log("Waiting for original primary to rollback and shut down");
// Wait until the node shuts itself down during the rollback. We will hit the first assertion if
// we rollback using 'recoverToStableTimestamp' and the second if using 'rollbackViaRefetch'.
assert.soon(() => {
- return (rawMongoProgramOutput().indexOf("Fatal Assertion 50712") !== -1 ||
- rawMongoProgramOutput().indexOf("Fatal Assertion 40498") !== -1);
+ return (rawMongoProgramOutput().search(/Fatal Assertion.*(40498|50712)/) !== -1);
});
// Restart the original primary again. This time, the shardIdentity document should already be
diff --git a/jstests/sharding/time_zone_info_mongos.js b/jstests/sharding/time_zone_info_mongos.js
index 5ed55fdf879..6fa1e7c56cc 100644
--- a/jstests/sharding/time_zone_info_mongos.js
+++ b/jstests/sharding/time_zone_info_mongos.js
@@ -23,7 +23,7 @@ assert.eq(mongosCfg.parsed.processManagement.timeZoneInfo, tzGoodInfo);
// Test that a bad timezone file causes mongoS startup to fail.
let conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzBadInfo});
assert.eq(conn, null, "expected launching mongos with bad timezone rules to fail");
-assert.neq(-1, rawMongoProgramOutput().indexOf("Fatal assertion 40475"));
+assert.neq(-1, rawMongoProgramOutput().search(/Fatal assertion.*40475/) != -1);
// Test that a non-existent timezone directory causes mongoS startup to fail.
conn = MongoRunner.runMongos({configdb: st.configRS.getURL(), timeZoneInfo: tzNoInfo});
diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp
index 03c21e4ce5f..c6ceaede4e5 100644
--- a/src/mongo/db/s/transaction_coordinator.cpp
+++ b/src/mongo/db/s/transaction_coordinator.cpp
@@ -491,7 +491,7 @@ void TransactionCoordinator::_logSlowTwoPhaseCommit(
duration_cast<Milliseconds>(
singleTransactionCoordinatorStats.getTwoPhaseCommitDuration(tickSource, curTick)));
- LOGV2(51804, "two-phsase commit", attrs);
+ LOGV2(51804, "two-phase commit", attrs);
} else {
LOGV2(22448,
diff --git a/src/mongo/shell/check_log.js b/src/mongo/shell/check_log.js
index cf514536f99..db9e830219f 100644
--- a/src/mongo/shell/check_log.js
+++ b/src/mongo/shell/check_log.js
@@ -165,6 +165,30 @@ checkLog = (function() {
: `{ ${serialized.join(', ')} }`);
};
+ /**
+ * Format at the json for the log file which has no extra spaces.
+ */
+ const formatAsJsonLogLine = function(value, escapeStrings, toDecimal) {
+ if (typeof value === "string") {
+ return (escapeStrings ? `"${value}"` : value);
+ } else if (typeof value === "number") {
+ return (Number.isInteger(value) && toDecimal ? value.toFixed(1) : value);
+ } else if (value instanceof NumberLong) {
+ return `${value}`.match(/NumberLong..(.*)../m)[1];
+ } else if (typeof value !== "object") {
+ return value;
+ } else if (Object.keys(value).length === 0) {
+ return Array.isArray(value) ? "[]" : "{}";
+ }
+ let serialized = [];
+ escapeStrings = toDecimal = true;
+ for (let fieldName in value) {
+ const valueStr = formatAsJsonLogLine(value[fieldName], escapeStrings, toDecimal);
+ serialized.push(Array.isArray(value) ? valueStr : `"${fieldName}":${valueStr}`);
+ }
+ return (Array.isArray(value) ? `[${serialized.join(',')}]` : `{${serialized.join(',')}}`);
+ };
+
return {
getGlobalLog: getGlobalLog,
checkContainsOnce: checkContainsOnce,
@@ -172,7 +196,8 @@ checkLog = (function() {
contains: contains,
containsWithCount: containsWithCount,
containsWithAtLeastCount: containsWithAtLeastCount,
- formatAsLogLine: formatAsLogLine
+ formatAsLogLine: formatAsLogLine,
+ formatAsJsonLogLine: formatAsJsonLogLine
};
})();
})();
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 9d4b6ea5a09..1160d4b454c 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -707,8 +707,8 @@ var ReplSetTest = function(opts) {
// Confirm that the node is unable to recover after rolling back.
checkLog.contains(
connToCheck,
- "remote oplog does not contain entry with optime matching our required optime ",
- 60 * 1000);
+ "remote oplog does not contain entry with optime matching our required optime",
+ 120 * 1000);
} catch (checkLogEx) {
throw originalEx;
}