diff options
author | Shreyas Kalyan <shreyas.kalyan@10gen.com> | 2021-04-20 13:12:39 -0400 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2021-04-30 17:12:57 +0000 |
commit | 1c2ae5a2203000534aae7f3fb5b6317b60a35d02 (patch) | |
tree | 9db5efa36189b6cd2983b1bd103166b11a527144 /jstests/concurrency/fsm_libs | |
parent | 5ff59681de1f16392dd38658598996861b09d44e (diff) | |
download | mongo-1c2ae5a2203000534aae7f3fb5b6317b60a35d02.tar.gz |
SERVER-55963 Use "denylist" in replication subsystems
Diffstat (limited to 'jstests/concurrency/fsm_libs')
-rw-r--r-- | jstests/concurrency/fsm_libs/cluster.js | 4 | ||||
-rw-r--r-- | jstests/concurrency/fsm_libs/runner.js | 36 |
2 files changed, 20 insertions, 20 deletions
diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js index 0689938a823..8c3f58d43bb 100644 --- a/jstests/concurrency/fsm_libs/cluster.js +++ b/jstests/concurrency/fsm_libs/cluster.js @@ -542,7 +542,7 @@ var Cluster = function(options) { jsTest.log('Finished validating collections in ' + totalTime + ' ms, ' + phase); }; - this.checkReplicationConsistency = function checkReplicationConsistency(dbBlacklist, phase) { + this.checkReplicationConsistency = function checkReplicationConsistency(dbDenylist, phase) { assert(initialized, 'cluster must be initialized first'); if (!this.isReplication()) { @@ -561,7 +561,7 @@ var Cluster = function(options) { ' assumed to still be primary, ' + phase); // Compare the dbhashes of the primary and secondaries. - rst.checkReplicatedDataHashes(phase, dbBlacklist); + rst.checkReplicatedDataHashes(phase, dbDenylist); var totalTime = Date.now() - startTime; jsTest.log('Finished consistency checks of replica set with ' + primary.host + ' as primary in ' + totalTime + ' ms, ' + phase); diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js index e8bbf4424dd..0b06253d54f 100644 --- a/jstests/concurrency/fsm_libs/runner.js +++ b/jstests/concurrency/fsm_libs/runner.js @@ -123,7 +123,7 @@ var runner = (function() { } function validateCleanupOptions(options) { - var allowedKeys = ['dropDatabaseBlacklist', 'keepExistingDatabases', 'validateCollections']; + var allowedKeys = ['dropDatabaseDenylist', 'keepExistingDatabases', 'validateCollections']; Object.keys(options).forEach(function(option) { assert.contains(option, @@ -132,9 +132,9 @@ var runner = (function() { '; valid options are: ' + tojson(allowedKeys)); }); - if (typeof options.dropDatabaseBlacklist !== 'undefined') { - assert(Array.isArray(options.dropDatabaseBlacklist), - 'expected dropDatabaseBlacklist to be an array'); + if (typeof options.dropDatabaseDenylist !== 'undefined') { + assert(Array.isArray(options.dropDatabaseDenylist), + 'expected dropDatabaseDenylist to be an array'); } if (typeof options.keepExistingDatabases !== 'undefined') { @@ -245,12 +245,12 @@ var runner = (function() { }); } - function dropAllDatabases(db, blacklist) { + function dropAllDatabases(db, denylist) { var res = db.adminCommand('listDatabases'); assert.commandWorked(res); res.databases.forEach(function(dbInfo) { - if (!Array.contains(blacklist, dbInfo.name)) { + if (!Array.contains(denylist, dbInfo.name)) { assert.commandWorked(db.getSiblingDB(dbInfo.name).dropDatabase()); } }); @@ -425,7 +425,7 @@ var runner = (function() { } function cleanupWorkload( - workload, context, cluster, errors, header, dbHashBlacklist, cleanupOptions) { + workload, context, cluster, errors, header, dbHashDenylist, cleanupOptions) { // Returns true if the workload's teardown succeeds and false if the workload's // teardown fails. @@ -434,7 +434,7 @@ var runner = (function() { try { // Ensure that all data has replicated correctly to the secondaries before calling the // workload's teardown method. - cluster.checkReplicationConsistency(dbHashBlacklist, phase); + cluster.checkReplicationConsistency(dbHashDenylist, phase); } catch (e) { errors.push(new WorkloadFailure( e.toString(), e.stack, 'main', header + ' checking consistency on secondaries')); @@ -494,7 +494,7 @@ var runner = (function() { executionOptions, errors, maxAllowedThreads, - dbHashBlacklist, + dbHashDenylist, configServerData, cleanupOptions) { var cleanup = []; @@ -568,7 +568,7 @@ var runner = (function() { cluster, errors, 'Foreground', - dbHashBlacklist, + dbHashDenylist, cleanupOptions)); teardownFailed = cleanupResults.some(success => (success === false)); @@ -587,7 +587,7 @@ var runner = (function() { throwError(errors); // Ensure that all operations replicated correctly to the secondaries. - cluster.checkReplicationConsistency(dbHashBlacklist, + cluster.checkReplicationConsistency(dbHashDenylist, 'after workload-group teardown and data clean-up'); } @@ -635,17 +635,17 @@ var runner = (function() { // to avoid having too many open files. // List of DBs that will not be dropped. - var dbBlacklist = ['admin', 'config', 'local', '$external']; + var dbDenylist = ['admin', 'config', 'local', '$external']; // List of DBs that dbHash is not run on. - var dbHashBlacklist = ['local']; + var dbHashDenylist = ['local']; - if (cleanupOptions.dropDatabaseBlacklist) { - dbBlacklist.push(...cleanupOptions.dropDatabaseBlacklist); - dbHashBlacklist.push(...cleanupOptions.dropDatabaseBlacklist); + if (cleanupOptions.dropDatabaseDenylist) { + dbDenylist.push(...cleanupOptions.dropDatabaseDenylist); + dbHashDenylist.push(...cleanupOptions.dropDatabaseDenylist); } if (!cleanupOptions.keepExistingDatabases) { - dropAllDatabases(cluster.getDB('test'), dbBlacklist); + dropAllDatabases(cluster.getDB('test'), dbDenylist); } var maxAllowedThreads = 100 * executionOptions.threadMultiplier; @@ -678,7 +678,7 @@ var runner = (function() { executionOptions, errors, maxAllowedThreads, - dbHashBlacklist, + dbHashDenylist, configServerData, cleanupOptions); }); |