summaryrefslogtreecommitdiff
path: root/jstests/replsets
diff options
context:
space:
mode:
authorIvan Fefer <ivan.fefer@mongodb.com>2022-09-05 13:37:04 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-09-07 10:51:15 +0000
commita7cda0d9cf32b7074b4c9a2efe6e6590b4dab564 (patch)
treed7be9f0105c6a982ed0372f2cfe60712ad7d04cd /jstests/replsets
parent7b8c49b09f2352b2573ebae2735ec2eb5b705aaf (diff)
downloadmongo-a7cda0d9cf32b7074b4c9a2efe6e6590b4dab564.tar.gz
SERVER-68871 Remove 'isJsonLogNoConn()' and 'isJsonLog()' functions because they always return true. Refactor the callers.
Diffstat (limited to 'jstests/replsets')
-rw-r--r--jstests/replsets/avg_num_catchup_ops.js15
-rw-r--r--jstests/replsets/catchup.js7
-rw-r--r--jstests/replsets/collection_clone_resume_after_network_error.js7
-rw-r--r--jstests/replsets/create_drop_database_different_casing.js7
-rw-r--r--jstests/replsets/drop_collections_two_phase_rename_drop_target.js7
-rw-r--r--jstests/replsets/drop_databases_two_phase.js14
-rw-r--r--jstests/replsets/initial_sync_applier_error.js7
-rw-r--r--jstests/replsets/initial_sync_drop_collection.js1
-rw-r--r--jstests/replsets/initial_sync_rename_collection.js28
-rw-r--r--jstests/replsets/initial_sync_test_fixture_test.js14
-rw-r--r--jstests/replsets/libs/election_handoff.js28
-rw-r--r--jstests/replsets/mr_nonrepl_coll_in_local_db.js9
-rw-r--r--jstests/replsets/no_flapping_during_network_partition.js13
-rw-r--r--jstests/replsets/read_after_optime.js12
14 files changed, 36 insertions, 133 deletions
diff --git a/jstests/replsets/avg_num_catchup_ops.js b/jstests/replsets/avg_num_catchup_ops.js
index f9d077eb99e..d0ff79335b9 100644
--- a/jstests/replsets/avg_num_catchup_ops.js
+++ b/jstests/replsets/avg_num_catchup_ops.js
@@ -5,7 +5,6 @@
(function() {
"use strict";
-load("jstests/libs/logv2_helpers.js");
load("jstests/libs/write_concern_util.js");
load("jstests/replsets/libs/election_metrics.js");
load("jstests/replsets/rslib.js");
@@ -30,11 +29,7 @@ restartServerReplication(stepUpResults.oldSecondaries);
// Block until the primary finishes drain mode.
assert.eq(stepUpResults.newPrimary, rst.getPrimary());
// Wait until the new primary completes the transition to primary and writes a no-op.
-if (isJsonLog(stepUpResults.newPrimary)) {
- checkLog.contains(stepUpResults.newPrimary, "Transition to primary complete");
-} else {
- checkLog.contains(stepUpResults.newPrimary, "transition to primary complete");
-}
+checkLog.contains(stepUpResults.newPrimary, "Transition to primary complete");
let testNodeReplSetGetStatus =
assert.commandWorked(stepUpResults.newPrimary.adminCommand({replSetGetStatus: 1}));
@@ -62,11 +57,7 @@ rst.awaitReplication();
stepUpResults = stopReplicationAndEnforceNewPrimaryToCatchUp(rst, testNode);
restartServerReplication(stepUpResults.oldSecondaries);
assert.eq(stepUpResults.newPrimary, rst.getPrimary());
-if (isJsonLog(stepUpResults.newPrimary)) {
- checkLog.contains(stepUpResults.newPrimary, "Transition to primary complete");
-} else {
- checkLog.contains(stepUpResults.newPrimary, "transition to primary complete");
-}
+checkLog.contains(stepUpResults.newPrimary, "Transition to primary complete");
rst.awaitReplication();
testNodeServerStatus =
@@ -80,4 +71,4 @@ assert.eq(testNodeServerStatus.electionMetrics.numCatchUps, 2);
assert.eq(testNodeServerStatus.electionMetrics.averageCatchUpOps, 3.5);
rst.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/replsets/catchup.js b/jstests/replsets/catchup.js
index 586b403d08b..a502a31c0b4 100644
--- a/jstests/replsets/catchup.js
+++ b/jstests/replsets/catchup.js
@@ -3,7 +3,6 @@
(function() {
"use strict";
-load("jstests/libs/logv2_helpers.js");
load("jstests/libs/write_concern_util.js");
load("jstests/replsets/libs/election_metrics.js");
load("jstests/replsets/rslib.js");
@@ -119,11 +118,7 @@ restartServerReplication(stepUpResults.oldSecondaries);
assert.eq(stepUpResults.newPrimary, rst.getPrimary());
// Wait until the new primary completes the transition to primary and writes a no-op.
-if (isJsonLog(stepUpResults.newPrimary)) {
- checkLog.contains(stepUpResults.newPrimary, "Transition to primary complete");
-} else {
- checkLog.contains(stepUpResults.newPrimary, "transition to primary complete");
-}
+checkLog.contains(stepUpResults.newPrimary, "Transition to primary complete");
// Check that the new primary's term has been updated because of the no-op.
assert.eq(getLatestOp(stepUpResults.newPrimary).t, stepUpResults.latestOpOnNewPrimary.t + 1);
diff --git a/jstests/replsets/collection_clone_resume_after_network_error.js b/jstests/replsets/collection_clone_resume_after_network_error.js
index c39384223dc..0a800befc90 100644
--- a/jstests/replsets/collection_clone_resume_after_network_error.js
+++ b/jstests/replsets/collection_clone_resume_after_network_error.js
@@ -11,7 +11,6 @@
load("jstests/replsets/rslib.js"); // For setLogVerbosity()
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/logv2_helpers.js");
// Verify the 'find' command received by the primary includes a resume token request.
function checkHasRequestResumeToken() {
@@ -27,11 +26,7 @@ function checkNoResumeAfter() {
// Verify the 'find' command received by the primary has resumeAfter set with the given recordId.
function checkHasResumeAfter(recordId) {
- if (isJsonLogNoConn()) {
- checkLog.contains(primary, `"$_resumeAfter":{"$recordId":${recordId}}`);
- } else {
- checkLog.contains(primary, "$_resumeAfter: { $recordId: " + recordId + " }");
- }
+ checkLog.contains(primary, `"$_resumeAfter":{"$recordId":${recordId}}`);
}
const beforeRetryFailPointName = "hangBeforeRetryingClonerStage";
diff --git a/jstests/replsets/create_drop_database_different_casing.js b/jstests/replsets/create_drop_database_different_casing.js
index 9b0e201cad1..76ed0d156ef 100644
--- a/jstests/replsets/create_drop_database_different_casing.js
+++ b/jstests/replsets/create_drop_database_different_casing.js
@@ -13,7 +13,6 @@
*
* @tags: [requires_replication]
*/
-load("jstests/libs/logv2_helpers.js");
(function() {
'use strict';
@@ -47,11 +46,7 @@ assert.commandFailedWithCode(lowerDB.createCollection("test"), ErrorCodes.Databa
rst.awaitReplication();
failPoint.off();
-if (isJsonLog(primary)) {
- checkLog.containsJson(primary, 20336, {"db": dbNameUpper});
-} else {
- checkLog.contains(primary, "dropDatabase " + dbNameUpper + " - finished");
-}
+checkLog.containsJson(primary, 20336, {"db": dbNameUpper});
assert.commandWorked(lowerDB.createCollection("test"));
awaitDropUpper();
diff --git a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
index 9aa9a15c9c1..ee8d25cd512 100644
--- a/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
+++ b/jstests/replsets/drop_collections_two_phase_rename_drop_target.js
@@ -6,7 +6,6 @@
(function() {
'use strict';
-load("jstests/libs/logv2_helpers.js");
load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
// Return a list of all indexes for a given collection. Use 'args' as the
@@ -103,11 +102,7 @@ try {
// Confirm in the logs that the renameCollection dropped the target collection on the
// secondary using two phase collection drop.
- if (isJsonLog(secondary)) {
- checkLog.containsJson(secondary, 20315, {namespace: toColl.getFullName()});
- } else {
- checkLog.contains(secondary, new RegExp('dropCollection:.*' + toColl.getFullName()));
- }
+ checkLog.containsJson(secondary, 20315, {namespace: toColl.getFullName()});
// Rename target collection back to source collection. This helps to ensure the collection
// metadata is updated correctly on both primary and secondary.
diff --git a/jstests/replsets/drop_databases_two_phase.js b/jstests/replsets/drop_databases_two_phase.js
index c3cb8ead592..3f8d27742ec 100644
--- a/jstests/replsets/drop_databases_two_phase.js
+++ b/jstests/replsets/drop_databases_two_phase.js
@@ -19,7 +19,6 @@
load('jstests/replsets/libs/two_phase_drops.js'); // For TwoPhaseDropCollectionTest.
load("jstests/replsets/rslib.js");
-load("jstests/libs/logv2_helpers.js");
load("jstests/libs/write_concern_util.js");
// Returns a list of all collections in a given database. Use 'args' as the
@@ -155,15 +154,10 @@ jsTestLog('Waiting for dropDatabase command on ' + primary.host + ' to complete.
var exitCode = dropDatabaseProcess();
let db = primary.getDB(dbNameToDrop);
-if (isJsonLog(db.getMongo())) {
- checkLog.contains(db.getMongo(),
- `dropDatabase - dropping collection","attr":{"db":"${
- dbNameToDrop}","namespace":"${dbNameToDrop}.${collNameToDrop}"`);
- checkLog.containsJson(db.getMongo(), 20336, {"db": "dbToDrop"});
-} else {
- checkLog.contains(db.getMongo(), "dropping collection: " + dbNameToDrop + "." + collNameToDrop);
- checkLog.contains(db.getMongo(), "dropped 1 collection(s)");
-}
+checkLog.contains(db.getMongo(),
+ `dropDatabase - dropping collection","attr":{"db":"${
+ dbNameToDrop}","namespace":"${dbNameToDrop}.${collNameToDrop}"`);
+checkLog.containsJson(db.getMongo(), 20336, {"db": "dbToDrop"});
assert.eq(0, exitCode, 'dropDatabase command on ' + primary.host + ' failed.');
jsTestLog('Completed dropDatabase command on ' + primary.host);
diff --git a/jstests/replsets/initial_sync_applier_error.js b/jstests/replsets/initial_sync_applier_error.js
index e880c739ef1..951002b0bce 100644
--- a/jstests/replsets/initial_sync_applier_error.js
+++ b/jstests/replsets/initial_sync_applier_error.js
@@ -12,7 +12,6 @@
(function() {
"use strict";
load("jstests/libs/fail_point_util.js");
-load("jstests/libs/logv2_helpers.js");
var name = 'initial_sync_applier_error';
var replSet = new ReplSetTest({
@@ -43,11 +42,7 @@ var newCollName = name + '_2';
assert.commandWorked(coll.renameCollection(newCollName, true));
failPoint.off();
-if (isJsonLog(secondary)) {
- checkLog.contains(secondary, 'Initial sync done');
-} else {
- checkLog.contains(secondary, 'initial sync done');
-}
+checkLog.contains(secondary, 'Initial sync done');
replSet.awaitReplication();
replSet.awaitSecondaryNodes();
diff --git a/jstests/replsets/initial_sync_drop_collection.js b/jstests/replsets/initial_sync_drop_collection.js
index 9d82c45265f..8bf244ce4e2 100644
--- a/jstests/replsets/initial_sync_drop_collection.js
+++ b/jstests/replsets/initial_sync_drop_collection.js
@@ -8,7 +8,6 @@
load("jstests/libs/fail_point_util.js");
load('jstests/replsets/libs/two_phase_drops.js');
load("jstests/libs/uuid_util.js");
-load("jstests/libs/logv2_helpers.js");
// Set up replica set. Disallow chaining so nodes always sync from primary.
const testName = "initial_sync_drop_collection";
diff --git a/jstests/replsets/initial_sync_rename_collection.js b/jstests/replsets/initial_sync_rename_collection.js
index d77f8e87808..a5d21e3e968 100644
--- a/jstests/replsets/initial_sync_rename_collection.js
+++ b/jstests/replsets/initial_sync_rename_collection.js
@@ -8,7 +8,6 @@
load("jstests/libs/fail_point_util.js");
load("jstests/libs/uuid_util.js");
load('jstests/replsets/libs/two_phase_drops.js');
-load("jstests/libs/logv2_helpers.js");
// Set up replica set. Disallow chaining so nodes always sync from primary.
const testName = "initial_sync_rename_collection";
@@ -164,9 +163,8 @@ runRenameTest({
renameAcrossDBs: true
});
-const expectedLogFor5and7 = isJsonLogNoConn()
- ? '`Sync process retrying cloner stage due to error","attr":{"cloner":"CollectionCloner","stage":"query","error":{"code":175,"codeName":"QueryPlanKilled","errmsg":"collection renamed from \'${nss}\' to \'${rnss}\'. UUID ${uuid}"}}}`'
- : "`Sync process retrying CollectionCloner stage query due to QueryPlanKilled: collection renamed from '${nss}' to '${rnss}'. UUID ${uuid}`";
+const expectedLogFor5and7 =
+ '`Sync process retrying cloner stage due to error","attr":{"cloner":"CollectionCloner","stage":"query","error":{"code":175,"codeName":"QueryPlanKilled","errmsg":"collection renamed from \'${nss}\' to \'${rnss}\'. UUID ${uuid}"}}}`';
jsTestLog("[5] Testing rename between getMores.");
runRenameTest({
@@ -176,27 +174,17 @@ runRenameTest({
});
// A cross-DB rename will appear as a drop in the context of the source DB.
-let expectedLogFor6and8 =
- "`CollectionCloner ns: '${nss}' uuid: UUID(\"${uuid}\") stopped because collection was dropped on source.`";
-
-if (isJsonLogNoConn()) {
- // Double escape the backslash as eval will do unescaping
- expectedLogFor6and8 =
- '`CollectionCloner stopped because collection was dropped on source","attr":{"namespace":"${nss}","uuid":{"uuid":{"$uuid":"${uuid}"}}}}`';
-}
+// Double escape the backslash as eval will do unescaping
+const expectedLogFor6and8 =
+ '`CollectionCloner stopped because collection was dropped on source","attr":{"namespace":"${nss}","uuid":{"uuid":{"$uuid":"${uuid}"}}}}`';
// We don't support 4.2 style two-phase drops with EMRC=false - in that configuration, the
// collection will instead be renamed to a <db>.system.drop.* namespace before being dropped. Since
// the cloner queries collection by UUID, it will observe the first drop phase as a rename.
// We still want to check that initial sync succeeds in such a case.
if (TwoPhaseDropCollectionTest.supportsDropPendingNamespaces(replTest)) {
- if (isJsonLogNoConn()) {
- expectedLogFor6and8 =
- '`Sync process retrying cloner stage due to error","attr":{"cloner":"CollectionCloner","stage":"query","error":{"code":175,"codeName":"QueryPlanKilled","errmsg":"collection renamed from \'${nss}\' to \'${dropPendingNss}\'. UUID ${uuid}`';
- } else {
- expectedLogFor6and8 =
- "`Sync process retrying CollectionCloner stage query due to QueryPlanKilled: collection renamed from '${nss}' to '${dropPendingNss}'. UUID ${uuid}`";
- }
+ expectedLogFor6and8 =
+ '`Sync process retrying cloner stage due to error","attr":{"cloner":"CollectionCloner","stage":"query","error":{"code":175,"codeName":"QueryPlanKilled","errmsg":"collection renamed from \'${nss}\' to \'${dropPendingNss}\'. UUID ${uuid}`';
}
jsTestLog("[6] Testing cross-DB rename between getMores.");
@@ -223,4 +211,4 @@ runRenameTest({
});
replTest.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/replsets/initial_sync_test_fixture_test.js b/jstests/replsets/initial_sync_test_fixture_test.js
index 400f0c9c028..c72ac2430ac 100644
--- a/jstests/replsets/initial_sync_test_fixture_test.js
+++ b/jstests/replsets/initial_sync_test_fixture_test.js
@@ -20,7 +20,6 @@
load("jstests/core/txns/libs/prepare_helpers.js");
load("jstests/replsets/libs/initial_sync_test.js");
-load("jstests/libs/logv2_helpers.js");
/**
* Helper function to check that specific messages appeared or did not appear in the logs.
@@ -52,19 +51,12 @@ function checkLogForGetTimestampMsg(node, timestampName, timestamp, contains) {
* UUID to make sure that it corresponds to the expected collection.
*/
function checkLogForCollectionClonerMsg(node, commandName, dbname, contains, collUUID) {
- let msg =
- "Collection Cloner scheduled a remote command on the " + dbname + " db: { " + commandName;
-
- if (isJsonLog(node)) {
- msg = 'Collection Cloner scheduled a remote command","attr":{"stage":"' + dbname +
- " db: { " + commandName;
- }
+ let msg = 'Collection Cloner scheduled a remote command","attr":{"stage":"' + dbname +
+ " db: { " + commandName;
if (commandName === "listIndexes" && contains) {
msg += ": " + collUUID;
- if (isJsonLog(node)) {
- msg = msg.replace('("', '(\\"').replace('")', '\\")');
- }
+ msg = msg.replace('("', '(\\"').replace('")', '\\")');
}
checkLogForMsg(node, msg, contains);
diff --git a/jstests/replsets/libs/election_handoff.js b/jstests/replsets/libs/election_handoff.js
index 334def47df7..347d8798ccc 100644
--- a/jstests/replsets/libs/election_handoff.js
+++ b/jstests/replsets/libs/election_handoff.js
@@ -6,7 +6,6 @@
var ElectionHandoffTest = (function() {
load("jstests/replsets/rslib.js");
- load("jstests/libs/logv2_helpers.js");
const kStepDownPeriodSecs = 30;
const kSIGTERM = 15;
@@ -76,26 +75,13 @@ var ElectionHandoffTest = (function() {
// The checkLog() function blocks until the log line appears.
checkLog.contains(expectedCandidate, "Starting an election due to step up request");
- if (isJsonLog(expectedCandidate)) {
- // If there are only two nodes in the set, verify that the old primary voted "yes".
- if (numNodes === 2) {
- checkLog.contains(
- expectedCandidate,
- `Skipping dry run and running for election","attr":{"newTerm":${term + 1}}}`);
- checkLog.checkContainsOnceJson(
- expectedCandidate,
- 51799,
- {"term": term + 1, vote: "yes", "from": primary.host});
- }
- } else {
- // If there are only two nodes in the set, verify that the old primary voted "yes".
- if (numNodes === 2) {
- checkLog.contains(expectedCandidate,
- `skipping dry run and running for election in term ${term + 1}`);
- checkLog.contains(
- expectedCandidate,
- `VoteRequester(term ${term + 1}) received a yes vote from ${primary.host}`);
- }
+ // If there are only two nodes in the set, verify that the old primary voted "yes".
+ if (numNodes === 2) {
+ checkLog.contains(
+ expectedCandidate,
+ `Skipping dry run and running for election","attr":{"newTerm":${term + 1}}}`);
+ checkLog.checkContainsOnceJson(
+ expectedCandidate, 51799, {"term": term + 1, vote: "yes", "from": primary.host});
}
rst.awaitNodesAgreeOnPrimary();
diff --git a/jstests/replsets/mr_nonrepl_coll_in_local_db.js b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
index a3a395f3c1f..933533046e4 100644
--- a/jstests/replsets/mr_nonrepl_coll_in_local_db.js
+++ b/jstests/replsets/mr_nonrepl_coll_in_local_db.js
@@ -6,7 +6,6 @@
// We verify this requirement by running a map-reduce, examining the logs to find the names of
// all collections created, and checking the oplog for entries logging the creation of each of those
// collections.
-load("jstests/libs/logv2_helpers.js");
(function() {
"use strict";
@@ -46,13 +45,7 @@ assert.commandWorked(result);
const logLines = checkLog.getGlobalLog(primaryDB);
let createdCollections = [];
logLines.forEach(function(line) {
- let matchResult;
- if (isJsonLogNoConn()) {
- line.match(/createCollection: (.+) with/);
- } else {
- matchResult = line.match(/createCollection: .+ with.*"nss":"(.*)"/);
- }
- if (matchResult) {
+ if (line.match(/createCollection: (.+) with/)) {
createdCollections.push(matchResult[1]);
}
});
diff --git a/jstests/replsets/no_flapping_during_network_partition.js b/jstests/replsets/no_flapping_during_network_partition.js
index 7b8f3e5df91..7cc8b876341 100644
--- a/jstests/replsets/no_flapping_during_network_partition.js
+++ b/jstests/replsets/no_flapping_during_network_partition.js
@@ -12,7 +12,6 @@
(function() {
"use strict";
-load("jstests/libs/logv2_helpers.js");
var name = "no_flapping_during_network_partition";
@@ -41,21 +40,13 @@ primary.disconnect(secondary);
jsTestLog("Wait long enough for the secondary to call for an election.");
checkLog.contains(secondary, "can see a healthy primary");
-if (isJsonLog(secondary)) {
- checkLog.contains(secondary, "Not running for primary");
-} else {
- checkLog.contains(secondary, "not running for primary");
-}
+checkLog.contains(secondary, "Not running for primary");
jsTestLog("Verify the primary and secondary do not change during the partition.");
assert.eq(primary, replTest.getPrimary());
assert.eq(secondary, replTest.getSecondary());
-if (isJsonLog(secondary)) {
- checkLog.contains(secondary, "Not running for primary");
-} else {
- checkLog.contains(secondary, "not running for primary");
-}
+checkLog.contains(secondary, "Not running for primary");
jsTestLog("Heal the partition.");
primary.reconnect(secondary);
diff --git a/jstests/replsets/read_after_optime.js b/jstests/replsets/read_after_optime.js
index 5673846d31d..619c2225bde 100644
--- a/jstests/replsets/read_after_optime.js
+++ b/jstests/replsets/read_after_optime.js
@@ -1,5 +1,4 @@
// Test read after opTime functionality with maxTimeMS.
-load("jstests/libs/logv2_helpers.js");
(function() {
"use strict";
@@ -37,14 +36,9 @@ var runTest = function(testDB, primaryConn) {
runTimeoutTest();
testDB.setLogLevel(0, 'command');
- var msg = 'Command on database ' + testDB.getName() +
- ' timed out waiting for read concern to be satisfied. Command:';
-
- if (isJsonLog(testDB.getMongo())) {
- msg =
- new RegExp(`Command timed out waiting for read concern to be satisfied.*"attr":{"db":"${
- testDB.getName()}",*`);
- }
+ const msg =
+ new RegExp(`Command timed out waiting for read concern to be satisfied.*"attr":{"db":"${
+ testDB.getName()}",*`);
checkLog.containsWithCount(testDB.getMongo(), msg, 1);
// Clear the log to not fill up the ramlog