summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYu Jin Kang Park <yujin.kang@mongodb.com>2022-06-15 13:19:49 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-07-06 12:17:47 +0000
commit574d60b493f71e6433a784dc3a2a8fd3fed171a1 (patch)
tree0ec8447e2cf81fa33f7729f779afe649ab0f344f
parent02dfedb849374159219251422ada6035333e2c3b (diff)
downloadmongo-574d60b493f71e6433a784dc3a2a8fd3fed171a1.tar.gz
SERVER-55208: Remove nojournal,duroff and evergreen variant
-rw-r--r--buildscripts/packaging/msi/mongod.yaml4
-rw-r--r--buildscripts/resmokeconfig/suites/dbtest.yml1
-rw-r--r--buildscripts/resmokelib/core/programs.py1
-rwxr-xr-xbuildscripts/resmokelib/powercycle/powercycle.py6
-rw-r--r--buildscripts/resmokelib/run/__init__.py3
-rw-r--r--buildscripts/resmokelib/testing/fixtures/replicaset.py4
-rw-r--r--buildscripts/resmokelib/testing/fixtures/standalone.py9
-rw-r--r--buildscripts/tests/resmokelib/test_parser.py15
-rw-r--r--buildscripts/tests/test_burn_in_tags_evergreen.yml2
-rw-r--r--etc/evergreen.yml44
-rw-r--r--jstests/auth/curop_auth_info.js2
-rw-r--r--jstests/auth/currentop_cursors_auth.js2
-rw-r--r--jstests/auth/drop-user-transaction.js2
-rw-r--r--jstests/auth/impersonation-deny.js7
-rw-r--r--jstests/disk/wt_repair_corrupt_metadata.js51
-rw-r--r--jstests/disk/wt_table_checks.js2
-rw-r--r--jstests/libs/command_line/test_parsed_options.js6
-rw-r--r--jstests/libs/config_files/disable_dur.ini1
-rw-r--r--jstests/libs/config_files/disable_journal.ini1
-rw-r--r--jstests/libs/config_files/disable_nodur.ini1
-rw-r--r--jstests/libs/config_files/disable_nojournal.ini1
-rw-r--r--jstests/libs/config_files/enable_journal.json7
-rw-r--r--jstests/libs/config_files/implicitly_enable_dur.ini1
-rw-r--r--jstests/libs/config_files/implicitly_enable_journal.ini1
-rw-r--r--jstests/libs/config_files/implicitly_enable_nodur.ini1
-rw-r--r--jstests/libs/config_files/implicitly_enable_nojournal.ini1
-rw-r--r--jstests/noPassthrough/agg_explain_read_concern.js8
-rw-r--r--jstests/noPassthrough/agg_group.js2
-rw-r--r--jstests/noPassthrough/api_version_2_commands.js2
-rw-r--r--jstests/noPassthrough/api_version_parameters_shell.js2
-rw-r--r--jstests/noPassthrough/backup_restore_rolling.js7
-rw-r--r--jstests/noPassthrough/batched_multi_deletes.js3
-rw-r--r--jstests/noPassthrough/batched_multi_deletes_oplog.js3
-rw-r--r--jstests/noPassthrough/batched_multi_deletes_params.js3
-rw-r--r--jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js2
-rw-r--r--jstests/noPassthrough/change_stream_error_label.js3
-rw-r--r--jstests/noPassthrough/change_stream_failover.js1
-rw-r--r--jstests/noPassthrough/change_stream_unwind_batched_writes.js3
-rw-r--r--jstests/noPassthrough/change_streams_collation_chunk_migration.js1
-rw-r--r--jstests/noPassthrough/change_streams_oplog_rollover.js2
-rw-r--r--jstests/noPassthrough/change_streams_require_majority_read_concern.js8
-rw-r--r--jstests/noPassthrough/change_streams_required_privileges.js2
-rw-r--r--jstests/noPassthrough/change_streams_resume_at_same_clustertime.js1
-rw-r--r--jstests/noPassthrough/change_streams_shell_helper_resume_token.js2
-rw-r--r--jstests/noPassthrough/change_streams_update_lookup_collation.js8
-rw-r--r--jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js4
-rw-r--r--jstests/noPassthrough/collmod_convert_to_unique_locking.js4
-rw-r--r--jstests/noPassthrough/collmod_disallow_duplicates_restart.js4
-rw-r--r--jstests/noPassthrough/collmod_disallow_duplicates_step_up.js4
-rw-r--r--jstests/noPassthrough/collmod_index_noop.js2
-rw-r--r--jstests/noPassthrough/columnstore_index_persistence.js2
-rw-r--r--jstests/noPassthrough/command_line_parsing.js2
-rw-r--r--jstests/noPassthrough/comment_field_passthrough.js1
-rw-r--r--jstests/noPassthrough/commit_quorum_voting_nodes.js1
-rw-r--r--jstests/noPassthrough/dbcheck_batch_deadline.js1
-rw-r--r--jstests/noPassthrough/deprecated_map_reduce.js10
-rw-r--r--jstests/noPassthrough/durable_hidden_index.js2
-rw-r--r--jstests/noPassthrough/durable_view_catalog.js2
-rw-r--r--jstests/noPassthrough/exit_logging.js4
-rw-r--r--jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js2
-rw-r--r--jstests/noPassthrough/index_build_prepareUnique.js4
-rw-r--r--jstests/noPassthrough/index_build_restart_secondary.js1
-rw-r--r--jstests/noPassthrough/indexbg1.js2
-rw-r--r--jstests/noPassthrough/indexbg2.js2
-rw-r--r--jstests/noPassthrough/killOp_against_journal_flusher_thread.js4
-rw-r--r--jstests/noPassthrough/large_txn_correctness.js2
-rw-r--r--jstests/noPassthrough/logical_session_cursor_checks.js2
-rw-r--r--jstests/noPassthrough/max_time_ms_sharded.js2
-rw-r--r--jstests/noPassthrough/non_durable_writes_on_primary_can_reach_majority.js4
-rw-r--r--jstests/noPassthrough/oplog_retention_hours.js2
-rw-r--r--jstests/noPassthrough/oplog_rollover_agg.js2
-rw-r--r--jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js3
-rw-r--r--jstests/noPassthrough/out_majority_read_replset.js8
-rw-r--r--jstests/noPassthrough/out_merge_majority_read.js8
-rw-r--r--jstests/noPassthrough/query_yields_catch_index_corruption.js1
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime.js7
-rw-r--r--jstests/noPassthrough/readConcern_atClusterTime_noop_write.js8
-rw-r--r--jstests/noPassthrough/read_concern_snapshot_yielding.js8
-rw-r--r--jstests/noPassthrough/read_majority.js8
-rw-r--r--jstests/noPassthrough/read_majority_reads.js8
-rw-r--r--jstests/noPassthrough/read_write_concern_defaults_startup.js2
-rw-r--r--jstests/noPassthrough/reconfig_for_psa_set_shell.js2
-rw-r--r--jstests/noPassthrough/refresh_sessions_command.js2
-rw-r--r--jstests/noPassthrough/reindex_crash_rebuilds_id_index.js1
-rw-r--r--jstests/noPassthrough/require_api_version.js1
-rw-r--r--jstests/noPassthrough/serverStatus_does_not_block_on_RSTL.js2
-rw-r--r--jstests/noPassthrough/server_write_concern_metrics.js1
-rw-r--r--jstests/noPassthrough/standalone_replication_recovery.js3
-rw-r--r--jstests/noPassthrough/start_session_command.js2
-rw-r--r--jstests/noPassthrough/startup_recovery_truncates_oplog_holes_after_primary_crash.js3
-rw-r--r--jstests/noPassthrough/sync_write.js2
-rw-r--r--jstests/noPassthrough/temporarily_unavailable_error.js1
-rw-r--r--jstests/noPassthrough/timeseries_idle_buckets.js2
-rw-r--r--jstests/noPassthrough/unsupported_change_stream_deployments.js8
-rw-r--r--jstests/noPassthrough/verify_session_cache_updates.js2
-rw-r--r--jstests/noPassthrough/wt_change_log_compressor.js42
-rw-r--r--jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js8
-rw-r--r--jstests/noPassthrough/wt_nojournal_skip_recovery.js99
-rw-r--r--jstests/noPassthrough/wt_nojournal_toggle.js123
-rw-r--r--jstests/noPassthrough/wt_unclean_shutdown.js2
-rw-r--r--jstests/noPassthroughWithMongod/bench_test_crud_commands.js5
-rw-r--r--jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js2
-rw-r--r--jstests/replsets/batch_write_command_wc.js11
-rw-r--r--jstests/replsets/bulk_api_wc.js11
-rw-r--r--jstests/replsets/find_and_modify_wc.js11
-rw-r--r--jstests/replsets/rollback_dup_ids.js4
-rw-r--r--jstests/replsets/standalone_replication_recovery_idempotent.js2
-rw-r--r--jstests/replsets/standalone_replication_recovery_prepare_only.js2
-rw-r--r--jstests/replsets/standalone_replication_recovery_prepare_with_commit.js2
-rw-r--r--jstests/replsets/standalone_replication_recovery_relaxes_index_constraints.js2
-rw-r--r--jstests/replsets/startup_recovery_for_restore.js2
-rw-r--r--jstests/replsets/startup_recovery_for_restore_needs_rollback.js2
-rw-r--r--jstests/replsets/startup_recovery_for_restore_restarts.js2
-rw-r--r--jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js1
-rw-r--r--jstests/selinux/core.js2
-rw-r--r--jstests/serverless/rd.js2
-rw-r--r--jstests/serverless/serverlesstest.js2
-rw-r--r--jstests/sharding/config_rs_change.js2
-rw-r--r--jstests/sharding/empty_cluster_init.js2
-rw-r--r--jstests/sharding/mongos_wait_csrs_initiate.js4
-rw-r--r--jstests/sharding/server37750.js4
-rw-r--r--jstests/sharding/shard_aware_init.js2
-rw-r--r--jstests/sharding/shard_identity_rollback.js2
-rw-r--r--jstests/sharding/sharding_options.js8
-rw-r--r--jstests/slow1/conc_update.js2
-rw-r--r--jstests/slow1/initial_sync_many_dbs.js7
-rw-r--r--jstests/slow1/replsets_priority1.js8
-rw-r--r--jstests/ssl/libs/ssl_helpers.js2
-rw-r--r--jstests/ssl/ssl_without_ca.js1
-rw-r--r--jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js3
-rw-r--r--jstests/ssl_x509/initial_sync1_x509.js2
-rw-r--r--jstests/ssl_x509/shell_x509_system_user.js2
-rw-r--r--jstests/ssl_x509/upgrade_to_x509_ssl.js2
-rw-r--r--jstests/watchdog/wd_journal_hang.js2
-rw-r--r--src/mongo/db/mongod_main.cpp11
-rw-r--r--src/mongo/db/mongod_options.cpp37
-rw-r--r--src/mongo/db/mongod_options.h3
-rw-r--r--src/mongo/db/mongod_options_sharding.idl2
-rw-r--r--src/mongo/db/mongod_options_storage.idl12
-rw-r--r--src/mongo/db/repl/oplog_applier_impl.cpp6
-rw-r--r--src/mongo/db/repl/replication_consistency_markers_impl.cpp2
-rw-r--r--src/mongo/db/repl/replication_coordinator_impl.cpp29
-rw-r--r--src/mongo/db/startup_warnings_mongod.cpp26
-rw-r--r--src/mongo/db/storage/control/storage_control.cpp6
-rw-r--r--src/mongo/db/storage/devnull/devnull_kv_engine.h7
-rw-r--r--src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp4
-rw-r--r--src/mongo/db/storage/kv/kv_engine.h2
-rw-r--r--src/mongo/db/storage/recovery_unit.h2
-rw-r--r--src/mongo/db/storage/storage_engine.h5
-rw-r--r--src/mongo/db/storage/storage_engine_impl.cpp4
-rw-r--r--src/mongo/db/storage/storage_engine_impl.h2
-rw-r--r--src/mongo/db/storage/storage_engine_mock.h4
-rw-r--r--src/mongo/db/storage/storage_options.cpp3
-rw-r--r--src/mongo/db/storage/storage_options.h2
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp1
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp78
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h7
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp1
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp1
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp3
-rw-r--r--src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp6
-rw-r--r--src/mongo/db/write_concern.cpp6
-rw-r--r--src/mongo/dbtests/framework_options.idl6
-rw-r--r--src/mongo/embedded/embedded_options.h4
-rw-r--r--src/mongo/s/mongos_options.h4
-rw-r--r--src/mongo/shell/replsettest.js18
-rw-r--r--src/mongo/shell/servers.js7
-rw-r--r--src/mongo/shell/shardingtest.js2
-rw-r--r--src/mongo/shell/utils.js1
-rw-r--r--src/mongo/watchdog/watchdog_mongod.cpp29
170 files changed, 227 insertions, 914 deletions
diff --git a/buildscripts/packaging/msi/mongod.yaml b/buildscripts/packaging/msi/mongod.yaml
index dd4bdb39011..6cfd2b41427 100644
--- a/buildscripts/packaging/msi/mongod.yaml
+++ b/buildscripts/packaging/msi/mongod.yaml
@@ -6,10 +6,6 @@
# Where and how to store data.
storage:
dbPath: %MONGO_DATA_PATH%
- journal:
- enabled: true
-# engine:
-# wiredTiger:
# where to write logging data.
systemLog:
diff --git a/buildscripts/resmokeconfig/suites/dbtest.yml b/buildscripts/resmokeconfig/suites/dbtest.yml
index da62ab67992..71c0274dd84 100644
--- a/buildscripts/resmokeconfig/suites/dbtest.yml
+++ b/buildscripts/resmokeconfig/suites/dbtest.yml
@@ -5,4 +5,3 @@ selector: {}
executor:
config:
dbtest_options:
- dur: ''
diff --git a/buildscripts/resmokelib/core/programs.py b/buildscripts/resmokelib/core/programs.py
index c4c97238327..8c94cadcad9 100644
--- a/buildscripts/resmokelib/core/programs.py
+++ b/buildscripts/resmokelib/core/programs.py
@@ -135,7 +135,6 @@ def mongo_shell_program( # pylint: disable=too-many-arguments,too-many-branches
"enableMajorityReadConcern": (config.MAJORITY_READ_CONCERN, True),
"mixedBinVersions": (config.MIXED_BIN_VERSIONS, ""),
"multiversionBinVersion": (shell_mixed_version, ""),
- "noJournal": (config.NO_JOURNAL, False),
"storageEngine": (config.STORAGE_ENGINE, ""),
"storageEngineCacheSizeGB": (config.STORAGE_ENGINE_CACHE_SIZE, ""),
"testName": (test_name, ""),
diff --git a/buildscripts/resmokelib/powercycle/powercycle.py b/buildscripts/resmokelib/powercycle/powercycle.py
index 21f9b74b37c..84718599dd1 100755
--- a/buildscripts/resmokelib/powercycle/powercycle.py
+++ b/buildscripts/resmokelib/powercycle/powercycle.py
@@ -1329,7 +1329,6 @@ def main(parser_actions, options): # pylint: disable=too-many-branches,too-many
# Initialize the mongod options
# Note - We use posixpath for Windows client to Linux server scenarios.
root_dir = f"{powercycle_constants.REMOTE_DIR}/mongodb-powercycle-test-{int(time.time())}"
- mongod_options_map = parse_options(task_config.mongod_options)
set_fcv_cmd = "set_fcv" if task_config.fcv is not None else ""
# Error out earlier if these options are not properly specified
@@ -1411,11 +1410,6 @@ def main(parser_actions, options): # pylint: disable=too-many-branches,too-many
mongo_repo_root_dir = os.getcwd()
# Setup the validate_canary option.
- if "nojournal" in mongod_options_map:
- LOGGER.error("Cannot create and validate canary documents if the mongod option"
- " '--nojournal' is used.")
- local_exit(1)
-
canary_doc = ""
# Set the Pymongo connection timeout to 1 hour for canary insert & validation.
diff --git a/buildscripts/resmokelib/run/__init__.py b/buildscripts/resmokelib/run/__init__.py
index 20ff72c2c79..b16517f1d79 100644
--- a/buildscripts/resmokelib/run/__init__.py
+++ b/buildscripts/resmokelib/run/__init__.py
@@ -633,9 +633,6 @@ class RunPlugin(PluginInterface):
" started by resmoke.py. The argument is specified as bracketed YAML -"
" i.e. JSON with support for single quoted and unquoted keys."))
- parser.add_argument("--nojournal", action="store_true", dest="no_journal",
- help="Disables journaling for all mongod's.")
-
parser.add_argument("--numClientsPerFixture", type=int, dest="num_clients_per_fixture",
help="Number of clients running tests per fixture.")
diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py
index 7f502f6be76..a43b72a6d84 100644
--- a/buildscripts/resmokelib/testing/fixtures/replicaset.py
+++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py
@@ -160,9 +160,7 @@ class ReplicaSetFixture(interface.ReplFixture): # pylint: disable=too-many-inst
"writeConcernMajorityJournalDefault"] = self.write_concern_majority_journal_default
else:
server_status = client.admin.command({"serverStatus": 1})
- cmd_line_opts = client.admin.command({"getCmdLineOpts": 1})
- if not (server_status["storageEngine"]["persistent"] and cmd_line_opts["parsed"].get(
- "storage", {}).get("journal", {}).get("enabled", True)):
+ if not server_status["storageEngine"]["persistent"]:
repl_config["writeConcernMajorityJournalDefault"] = False
if self.replset_config_options.get("configsvr", False):
diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py
index 898a622d326..03dc436e15d 100644
--- a/buildscripts/resmokelib/testing/fixtures/standalone.py
+++ b/buildscripts/resmokelib/testing/fixtures/standalone.py
@@ -311,7 +311,6 @@ class MongodLauncher(object):
shortcut_opts = {
"enableMajorityReadConcern": self.config.MAJORITY_READ_CONCERN,
- "nojournal": self.config.NO_JOURNAL,
"storageEngine": self.config.STORAGE_ENGINE,
"transportLayer": self.config.TRANSPORT_LAYER,
"wiredTigerCollectionConfigString": self.config.WT_COLL_CONFIG,
@@ -327,16 +326,10 @@ class MongodLauncher(object):
shortcut_opts["wiredTigerCacheSizeGB"] = self.config.STORAGE_ENGINE_CACHE_SIZE
# These options are just flags, so they should not take a value.
- opts_without_vals = ("nojournal", "logappend")
-
- # Have the --nojournal command line argument to resmoke.py unset the journal option.
- if shortcut_opts["nojournal"] and "journal" in mongod_options:
- del mongod_options["journal"]
+ opts_without_vals = ("logappend")
# Ensure that config servers run with journaling enabled.
if "configsvr" in mongod_options:
- shortcut_opts["nojournal"] = False
- mongod_options["journal"] = ""
suite_set_parameters.setdefault("reshardingMinimumOperationDurationMillis", 5000)
suite_set_parameters.setdefault("reshardingCriticalSectionTimeoutMillis",
24 * 60 * 60) # 24 hours
diff --git a/buildscripts/tests/resmokelib/test_parser.py b/buildscripts/tests/resmokelib/test_parser.py
index b9fc74f088b..86c134b1292 100644
--- a/buildscripts/tests/resmokelib/test_parser.py
+++ b/buildscripts/tests/resmokelib/test_parser.py
@@ -81,21 +81,6 @@ class TestLocalCommandLine(unittest.TestCase):
"--includeWithAnyTags=tag3,tag5",
])
- def test_keeps_no_journal_option(self):
- cmdline = to_local_args([
- "run",
- "--suites=my_suite",
- "--nojournal",
- "--storageEngine=my_storage_engine",
- ])
-
- self.assertEqual(cmdline, [
- "run",
- "--suites=my_suite",
- "--storageEngine=my_storage_engine",
- "--nojournal",
- ])
-
def test_keeps_num_clients_per_fixture_option(self):
cmdline = to_local_args([
"run",
diff --git a/buildscripts/tests/test_burn_in_tags_evergreen.yml b/buildscripts/tests/test_burn_in_tags_evergreen.yml
index d6a651fcde8..2db458fff05 100644
--- a/buildscripts/tests/test_burn_in_tags_evergreen.yml
+++ b/buildscripts/tests/test_burn_in_tags_evergreen.yml
@@ -125,7 +125,7 @@ buildvariants:
multiversion_edition: enterprise
multiversion_platform: rhel80
scons_cache_scope: shared
- test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence,requires_journaling
+ test_flags: --storageEngine=inMemory --excludeWithAnyTags=requires_persistence
modules:
- enterprise
run_on:
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 6de39718d11..08f6d9489b8 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -288,46 +288,6 @@ buildvariants:
num_scons_link_jobs_available: 0.99
test_flags: --excludeWithAnyTags=requires_http_client
-- name: linux-64-duroff
- display_name: Linux (No Journal)
- cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
- run_on:
- - rhel80-small
- expansions: &linux-64-required-duroff-expansions
- compile_flags: -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars --enable-free-mon=off --enable-http-client=off --link-model=dynamic
- multiversion_platform: rhel80
- multiversion_edition: targeted
- # Running WiredTiger with --nojournal in a replica set is no longer supported, so this variant
- # does not include replica set tests. Since transactions are only supported on replica sets, we
- # exclude those tests as well.
- test_flags: --nojournal --excludeWithAnyTags=requires_journaling,requires_replication,requires_sharding,uses_transactions,requires_http_client
- scons_cache_mode: all
- scons_cache_scope: shared
- num_scons_link_jobs_available: 0.99
- large_distro_name: rhel80-medium
- tasks:
- - name: compile_test_and_package_parallel_core_stream_TG
- distros:
- - rhel80-xlarge
- - name: compile_test_and_package_parallel_unittest_stream_TG
- distros:
- - rhel80-xlarge
- - name: compile_test_and_package_parallel_dbtest_stream_TG
- distros:
- - rhel80-xlarge
- - name: .aggfuzzer .common
- - name: aggregation !.feature_flag_guarded
- - name: aggregation_auth !.feature_flag_guarded
- - name: auth_gen
- - name: .misc_js !.sharded
- - name: concurrency_gen
- - name: concurrency_simultaneous_gen
- - name: disk_wiredtiger
- - name: failpoints_auth
- - name: .jscore .common !.sharding !.decimal !.txns
- - name: .jstestfuzz .common !.sharding !.repl
- - name: generate_buildid_to_debug_symbols_mapping
-
- name: tla-plus
display_name: TLA+
run_on:
@@ -1243,7 +1203,7 @@ buildvariants:
idle_timeout_factor: 1.5
exec_timeout_factor: 1.5
large_distro_name: rhel80-medium
- burn_in_tag_buildvariants: enterprise-rhel-80-64-bit-inmem linux-64-duroff enterprise-rhel-80-64-bit-multiversion
+ burn_in_tag_buildvariants: enterprise-rhel-80-64-bit-inmem enterprise-rhel-80-64-bit-multiversion
num_scons_link_jobs_available: 0.99
tasks:
- name: compile_test_and_package_parallel_core_stream_TG
@@ -1547,7 +1507,7 @@ buildvariants:
target_resmoke_time: 10
max_sub_suites: 5
large_distro_name: rhel80-medium
- burn_in_tag_buildvariants: enterprise-rhel-80-64-bit-inmem linux-64-duroff enterprise-rhel-80-64-bit-multiversion
+ burn_in_tag_buildvariants: enterprise-rhel-80-64-bit-inmem enterprise-rhel-80-64-bit-multiversion
num_scons_link_jobs_available: 0.99
test_flags: >-
--mongodSetParameters="{internalQueryForceClassicEngine: true}"
diff --git a/jstests/auth/curop_auth_info.js b/jstests/auth/curop_auth_info.js
index f05bea56c9b..30928933941 100644
--- a/jstests/auth/curop_auth_info.js
+++ b/jstests/auth/curop_auth_info.js
@@ -61,7 +61,7 @@ const m = MongoRunner.runMongod();
runTest(m, m);
MongoRunner.stopMongod(m);
-if (!jsTestOptions().noJournal) {
+if (jsTestOptions().storageEngine != "inMemory") {
const st = new ShardingTest({shards: 1, mongos: 1, config: 1, keyFile: 'jstests/libs/key1'});
runTest(st.s0, st.shard0);
st.stop();
diff --git a/jstests/auth/currentop_cursors_auth.js b/jstests/auth/currentop_cursors_auth.js
index 5b1cf3255c2..b51325de61a 100644
--- a/jstests/auth/currentop_cursors_auth.js
+++ b/jstests/auth/currentop_cursors_auth.js
@@ -1,7 +1,7 @@
/**
* Tests that a user's ability to view open cursors via $currentOp obeys authentication rules on
* both mongoD and mongoS.
- * @tags: [assumes_read_concern_unchanged, requires_auth, requires_journaling, requires_replication]
+ * @tags: [assumes_read_concern_unchanged, requires_auth, requires_replication]
*/
(function() {
"use strict";
diff --git a/jstests/auth/drop-user-transaction.js b/jstests/auth/drop-user-transaction.js
index d88ee8466fd..1a6c1003e48 100644
--- a/jstests/auth/drop-user-transaction.js
+++ b/jstests/auth/drop-user-transaction.js
@@ -1,5 +1,5 @@
// Validate dropUser performed via transaction.
-// @tags: [requires_journaling,requires_replication,exclude_from_large_txns]
+// @tags: [requires_replication,exclude_from_large_txns]
(function() {
'use strict';
diff --git a/jstests/auth/impersonation-deny.js b/jstests/auth/impersonation-deny.js
index 19522c66131..716094efa8e 100644
--- a/jstests/auth/impersonation-deny.js
+++ b/jstests/auth/impersonation-deny.js
@@ -59,13 +59,6 @@ function testMongod(mongod, systemuserpwd = undefined) {
MongoRunner.stopMongod(standalone);
}
-if (!jsTestOptions().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
{
const kKeyfile = 'jstests/libs/key1';
const kKey = cat(kKeyfile).replace(/[\011-\015\040]/g, '');
diff --git a/jstests/disk/wt_repair_corrupt_metadata.js b/jstests/disk/wt_repair_corrupt_metadata.js
index ed42fa595d5..6e529bf5d90 100644
--- a/jstests/disk/wt_repair_corrupt_metadata.js
+++ b/jstests/disk/wt_repair_corrupt_metadata.js
@@ -2,7 +2,7 @@
* Tests that --repair on WiredTiger correctly and gracefully handles corrupt metadata files.
* This test should not run on debug builds because WiredTiger's diagnostic mode is enabled.
*
- * @tags: [requires_wiredtiger,requires_journaling]
+ * @tags: [requires_wiredtiger]
*/
(function() {
@@ -69,40 +69,35 @@ let runTest = function(mongodOptions) {
// Corrupt the .turtle file in a very specific way such that the log sequence numbers are
// invalid.
- if (mongodOptions.hasOwnProperty('journal')) {
- if (_isAddressSanitizerActive()) {
- jsTestLog("Skipping log file corruption because the address sanitizer is active.");
- return;
- }
+ if (_isAddressSanitizerActive()) {
+ jsTestLog("Skipping log file corruption because the address sanitizer is active.");
+ return;
+ }
- jsTestLog("Corrupting log file metadata");
+ jsTestLog("Corrupting log file metadata");
- let data = cat(turtleFile, true /* useBinaryMode */);
- let re = /checkpoint_lsn=\(([0-9,]+)\)/g;
- let newData = data.replace(re, "checkpoint_lsn=(1,2)");
+ let data = cat(turtleFile, true /* useBinaryMode */);
+ let re = /checkpoint_lsn=\(([0-9,]+)\)/g;
+ let newData = data.replace(re, "checkpoint_lsn=(1,2)");
- print('writing data to new turtle file: \n' + newData);
- removeFile(turtleFile);
- writeFile(turtleFile, newData, true /* useBinaryMode */);
+ print('writing data to new turtle file: \n' + newData);
+ removeFile(turtleFile);
+ writeFile(turtleFile, newData, true /* useBinaryMode */);
- assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
+ assertRepairSucceeds(dbpath, mongod.port, mongodOptions);
- mongod = startMongodOnExistingPath(dbpath, mongodOptions);
- testColl = mongod.getDB(baseName)[collName];
+ mongod = startMongodOnExistingPath(dbpath, mongodOptions);
+ testColl = mongod.getDB(baseName)[collName];
- // The collection exists despite using a salvaged turtle file because salvage is able to
- // find the table in the WiredTiger.wt file.
- assert(testColl.exists());
+ // The collection exists despite using a salvaged turtle file because salvage is able to
+ // find the table in the WiredTiger.wt file.
+ assert(testColl.exists());
- // We can assert that the data exists because the salvage only took place on the
- // metadata, not the data.
- assert.eq(testColl.find({}).itcount(), 1);
- MongoRunner.stopMongod(mongod);
- }
+ // We can assert that the data exists because the salvage only took place on the
+ // metadata, not the data.
+ assert.eq(testColl.find({}).itcount(), 1);
+ MongoRunner.stopMongod(mongod);
};
-// Repair may behave differently with journaling enabled or disabled, but the end result should
-// be the same.
-runTest({journal: ""});
-runTest({nojournal: ""});
+runTest({});
})();
diff --git a/jstests/disk/wt_table_checks.js b/jstests/disk/wt_table_checks.js
index 47a16e02f7a..9fa135d8ebb 100644
--- a/jstests/disk/wt_table_checks.js
+++ b/jstests/disk/wt_table_checks.js
@@ -2,7 +2,7 @@
* Tests that MongoDB sets the WiredTiger table logging settings correctly under different
* circumstances.
*
- * @tags: [requires_wiredtiger, requires_journaling]
+ * @tags: [requires_wiredtiger]
*/
(function() {
diff --git a/jstests/libs/command_line/test_parsed_options.js b/jstests/libs/command_line/test_parsed_options.js
index e4400952760..7972bb6e22d 100644
--- a/jstests/libs/command_line/test_parsed_options.js
+++ b/jstests/libs/command_line/test_parsed_options.js
@@ -143,8 +143,8 @@ function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
// options of its own, and we only want to compare against the options we care about.
function getCmdLineOptsFromMongos(mongosOptions) {
// Start mongod with no options
- var baseMongod = MongoRunner.runMongod(
- {configsvr: "", journal: "", replSet: "csrs", storageEngine: "wiredTiger"});
+ var baseMongod =
+ MongoRunner.runMongod({configsvr: "", replSet: "csrs", storageEngine: "wiredTiger"});
assert.commandWorked(baseMongod.adminCommand({
replSetInitiate:
{_id: "csrs", configsvr: true, members: [{_id: 0, host: baseMongod.host}]}
@@ -219,4 +219,4 @@ function testGetCmdLineOptsMongos(mongoRunnerConfig, expectedResult) {
// testGetCmdLineOptsMongodFailed({ shardsvr : "" });
function testGetCmdLineOptsMongodFailed(mongoRunnerConfig) {
assert.throws(() => MongoRunner.runMongod(mongoRunnerConfig));
-} \ No newline at end of file
+}
diff --git a/jstests/libs/config_files/disable_dur.ini b/jstests/libs/config_files/disable_dur.ini
deleted file mode 100644
index 8f83f3ae5a7..00000000000
--- a/jstests/libs/config_files/disable_dur.ini
+++ /dev/null
@@ -1 +0,0 @@
-dur=false
diff --git a/jstests/libs/config_files/disable_journal.ini b/jstests/libs/config_files/disable_journal.ini
deleted file mode 100644
index d0010a86906..00000000000
--- a/jstests/libs/config_files/disable_journal.ini
+++ /dev/null
@@ -1 +0,0 @@
-journal=false
diff --git a/jstests/libs/config_files/disable_nodur.ini b/jstests/libs/config_files/disable_nodur.ini
deleted file mode 100644
index b0c73a48b30..00000000000
--- a/jstests/libs/config_files/disable_nodur.ini
+++ /dev/null
@@ -1 +0,0 @@
-nodur=false
diff --git a/jstests/libs/config_files/disable_nojournal.ini b/jstests/libs/config_files/disable_nojournal.ini
deleted file mode 100644
index 17172363d25..00000000000
--- a/jstests/libs/config_files/disable_nojournal.ini
+++ /dev/null
@@ -1 +0,0 @@
-nojournal=false
diff --git a/jstests/libs/config_files/enable_journal.json b/jstests/libs/config_files/enable_journal.json
deleted file mode 100644
index d75b94ccbc7..00000000000
--- a/jstests/libs/config_files/enable_journal.json
+++ /dev/null
@@ -1,7 +0,0 @@
-{
- "storage" : {
- "journal" : {
- "enabled" : false
- }
- }
-}
diff --git a/jstests/libs/config_files/implicitly_enable_dur.ini b/jstests/libs/config_files/implicitly_enable_dur.ini
deleted file mode 100644
index 43495fbd0bd..00000000000
--- a/jstests/libs/config_files/implicitly_enable_dur.ini
+++ /dev/null
@@ -1 +0,0 @@
-dur=
diff --git a/jstests/libs/config_files/implicitly_enable_journal.ini b/jstests/libs/config_files/implicitly_enable_journal.ini
deleted file mode 100644
index f750ac2e185..00000000000
--- a/jstests/libs/config_files/implicitly_enable_journal.ini
+++ /dev/null
@@ -1 +0,0 @@
-journal=
diff --git a/jstests/libs/config_files/implicitly_enable_nodur.ini b/jstests/libs/config_files/implicitly_enable_nodur.ini
deleted file mode 100644
index f1046df16a9..00000000000
--- a/jstests/libs/config_files/implicitly_enable_nodur.ini
+++ /dev/null
@@ -1 +0,0 @@
-nodur=
diff --git a/jstests/libs/config_files/implicitly_enable_nojournal.ini b/jstests/libs/config_files/implicitly_enable_nojournal.ini
deleted file mode 100644
index 737e5c28029..00000000000
--- a/jstests/libs/config_files/implicitly_enable_nojournal.ini
+++ /dev/null
@@ -1 +0,0 @@
-nojournal=
diff --git a/jstests/noPassthrough/agg_explain_read_concern.js b/jstests/noPassthrough/agg_explain_read_concern.js
index ce95cfccca6..4bc49984baa 100644
--- a/jstests/noPassthrough/agg_explain_read_concern.js
+++ b/jstests/noPassthrough/agg_explain_read_concern.js
@@ -5,14 +5,6 @@
(function() {
"use strict";
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
const rst = new ReplSetTest(
{name: "aggExplainReadConcernSet", nodes: 1, nodeOptions: {enableMajorityReadConcern: ""}});
rst.startSet();
diff --git a/jstests/noPassthrough/agg_group.js b/jstests/noPassthrough/agg_group.js
index 6d186946ae9..1f39b843946 100644
--- a/jstests/noPassthrough/agg_group.js
+++ b/jstests/noPassthrough/agg_group.js
@@ -10,8 +10,6 @@
// example, $group is pushed down to SBE at the shard-side and some accumulators may return the
// partial aggregation results in a special format to the mongos.
//
-// Needs the following tag to be excluded from linux-64-duroff build variant because running
-// wiredTiger without journaling in a replica set is not supported.
// @tags: [requires_sharding]
(function() {
'use strict';
diff --git a/jstests/noPassthrough/api_version_2_commands.js b/jstests/noPassthrough/api_version_2_commands.js
index a25c5ee73e5..6200a318638 100644
--- a/jstests/noPassthrough/api_version_2_commands.js
+++ b/jstests/noPassthrough/api_version_2_commands.js
@@ -2,7 +2,7 @@
* Checks that API version 2 will behave correctly with mongod/mongos.
*
* @tags: [
- * requires_journaling,
+ * requires_persistence,
* ]
*/
diff --git a/jstests/noPassthrough/api_version_parameters_shell.js b/jstests/noPassthrough/api_version_parameters_shell.js
index 4b4cb5e8984..617e80c7d03 100644
--- a/jstests/noPassthrough/api_version_parameters_shell.js
+++ b/jstests/noPassthrough/api_version_parameters_shell.js
@@ -3,7 +3,7 @@
* test passing API parameters to the Mongo() constructor.
*
* @tags: [
- * requires_journaling,
+ * requires_persistence,
* ]
*/
diff --git a/jstests/noPassthrough/backup_restore_rolling.js b/jstests/noPassthrough/backup_restore_rolling.js
index aa7b9ed3315..467fe54f6f2 100644
--- a/jstests/noPassthrough/backup_restore_rolling.js
+++ b/jstests/noPassthrough/backup_restore_rolling.js
@@ -30,13 +30,6 @@ if (_isWindows()) {
// Grab the storage engine, default is wiredTiger
var storageEngine = jsTest.options().storageEngine || "wiredTiger";
-// Skip this test if running with --nojournal.
-if (jsTest.options().noJournal) {
- print(
- "Skipping test because running without journaling isn't a valid replica set configuration");
- return;
-}
-
// if rsync is not available on the host, then this test is skipped
if (!runProgram('bash', '-c', 'which rsync')) {
new BackupRestoreTest({backup: 'rolling', clientTime: 30000}).run();
diff --git a/jstests/noPassthrough/batched_multi_deletes.js b/jstests/noPassthrough/batched_multi_deletes.js
index fc9e0ae8fdf..5a288f1693c 100644
--- a/jstests/noPassthrough/batched_multi_deletes.js
+++ b/jstests/noPassthrough/batched_multi_deletes.js
@@ -3,8 +3,7 @@
*
* @tags: [
* requires_fcv_61,
- * # Running as a replica set requires journaling.
- * requires_journaling,
+ * requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/batched_multi_deletes_oplog.js b/jstests/noPassthrough/batched_multi_deletes_oplog.js
index c9848adef4c..2bfc01bae93 100644
--- a/jstests/noPassthrough/batched_multi_deletes_oplog.js
+++ b/jstests/noPassthrough/batched_multi_deletes_oplog.js
@@ -3,8 +3,7 @@
*
* @tags: [
* requires_fcv_61,
- * # Running as a replica set requires journaling.
- * requires_journaling,
+ * requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/batched_multi_deletes_params.js b/jstests/noPassthrough/batched_multi_deletes_params.js
index a5af6e58397..4d13fb4ec31 100644
--- a/jstests/noPassthrough/batched_multi_deletes_params.js
+++ b/jstests/noPassthrough/batched_multi_deletes_params.js
@@ -3,8 +3,7 @@
*
* @tags: [
* requires_fcv_61,
- * # Running as a replica set requires journaling.
- * requires_journaling,
+ * requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js b/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
index 5a1e39291a1..6d3faea5cca 100644
--- a/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
+++ b/jstests/noPassthrough/change_stream_concurrent_implicit_db_create.js
@@ -2,7 +2,7 @@
// an order that avoids a deadlock.
// This test was designed to reproduce SERVER-34333.
// This test uses the WiredTiger storage engine, which does not support running without journaling.
-// @tags: [requires_replication, requires_journaling, requires_majority_read_concern]
+// @tags: [requires_replication, requires_majority_read_concern]
(function() {
"use strict";
diff --git a/jstests/noPassthrough/change_stream_error_label.js b/jstests/noPassthrough/change_stream_error_label.js
index a8feadd1714..cea4c9e481d 100644
--- a/jstests/noPassthrough/change_stream_error_label.js
+++ b/jstests/noPassthrough/change_stream_error_label.js
@@ -2,7 +2,6 @@
* Test that a change stream pipeline which encounters a retryable exception responds to the client
* with an error object that includes the "ResumableChangeStreamError" label.
* @tags: [
- * requires_journaling,
* requires_replication,
* uses_change_streams,
* ]
@@ -67,4 +66,4 @@ testFailGetMoreAfterCursorCheckoutFailpoint(
{errorCode: ErrorCodes.FailedToParse, expectedLabel: false});
rst.stopSet();
-}()); \ No newline at end of file
+}());
diff --git a/jstests/noPassthrough/change_stream_failover.js b/jstests/noPassthrough/change_stream_failover.js
index b95996a74b5..92c3eea9518 100644
--- a/jstests/noPassthrough/change_stream_failover.js
+++ b/jstests/noPassthrough/change_stream_failover.js
@@ -2,7 +2,6 @@
// by triggering a stepdown.
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [
-// requires_journaling,
// requires_majority_read_concern,
// requires_replication,
// ]
diff --git a/jstests/noPassthrough/change_stream_unwind_batched_writes.js b/jstests/noPassthrough/change_stream_unwind_batched_writes.js
index 6594041e3ce..bfd9f5b577c 100644
--- a/jstests/noPassthrough/change_stream_unwind_batched_writes.js
+++ b/jstests/noPassthrough/change_stream_unwind_batched_writes.js
@@ -3,8 +3,7 @@
*
* @tags: [
* requires_fcv_61,
- * # Running as a replica set requires journaling.
- * requires_journaling,
+ * requires_replication,
* requires_majority_read_concern,
* uses_change_streams,
* ]
diff --git a/jstests/noPassthrough/change_streams_collation_chunk_migration.js b/jstests/noPassthrough/change_streams_collation_chunk_migration.js
index 3386ba91480..ea7fd26b72e 100644
--- a/jstests/noPassthrough/change_streams_collation_chunk_migration.js
+++ b/jstests/noPassthrough/change_streams_collation_chunk_migration.js
@@ -2,7 +2,6 @@
* Tests that a change stream on a sharded collection with a non-simple default collation is not
* erroneously invalidated upon chunk migration. Reproduction script for the bug in SERVER-33944.
* @tags: [
- * requires_journaling,
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/change_streams_oplog_rollover.js b/jstests/noPassthrough/change_streams_oplog_rollover.js
index b684fa6791d..07981834923 100644
--- a/jstests/noPassthrough/change_streams_oplog_rollover.js
+++ b/jstests/noPassthrough/change_streams_oplog_rollover.js
@@ -1,6 +1,6 @@
// Tests the behaviour of change streams on an oplog which rolls over.
// @tags: [
-// requires_journaling,
+// requires_replication,
// requires_majority_read_concern,
// uses_change_streams,
// ]
diff --git a/jstests/noPassthrough/change_streams_require_majority_read_concern.js b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
index 3a0aa37734d..54af9e9806f 100644
--- a/jstests/noPassthrough/change_streams_require_majority_read_concern.js
+++ b/jstests/noPassthrough/change_streams_require_majority_read_concern.js
@@ -12,14 +12,6 @@ load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries
const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
rst.startSet();
rst.initiate();
diff --git a/jstests/noPassthrough/change_streams_required_privileges.js b/jstests/noPassthrough/change_streams_required_privileges.js
index 504b641e68d..d6a47a3e7a2 100644
--- a/jstests/noPassthrough/change_streams_required_privileges.js
+++ b/jstests/noPassthrough/change_streams_required_privileges.js
@@ -1,8 +1,8 @@
// Tests that a change stream requires the correct privileges to be run.
// This test uses the WiredTiger storage engine, which does not support running without journaling.
// @tags: [
-// requires_journaling,
// requires_majority_read_concern,
+// requires_persistence,
// requires_replication,
// ]
(function() {
diff --git a/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js b/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
index 0832545786f..b41fd01dfd3 100644
--- a/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
+++ b/jstests/noPassthrough/change_streams_resume_at_same_clustertime.js
@@ -3,7 +3,6 @@
* clusterTime is identical, differing only by documentKey, without causing the PBRT sent to mongoS
* to go back-in-time.
* @tags: [
- * requires_journaling,
* requires_majority_read_concern,
* requires_replication,
* ]
diff --git a/jstests/noPassthrough/change_streams_shell_helper_resume_token.js b/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
index 830bb0fd1fe..006efb86f1c 100644
--- a/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
+++ b/jstests/noPassthrough/change_streams_shell_helper_resume_token.js
@@ -3,7 +3,7 @@
* token with each document and returning the postBatchResumeToken as soon as each batch is
* exhausted.
* @tags: [
- * requires_journaling,
+ * requires_replication,
* requires_majority_read_concern,
* ]
*/
diff --git a/jstests/noPassthrough/change_streams_update_lookup_collation.js b/jstests/noPassthrough/change_streams_update_lookup_collation.js
index dff1cf7c07e..df322d41413 100644
--- a/jstests/noPassthrough/change_streams_update_lookup_collation.js
+++ b/jstests/noPassthrough/change_streams_update_lookup_collation.js
@@ -8,14 +8,6 @@
(function() {
"use strict";
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
diff --git a/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js b/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
index 6c5ae7b109b..cf47b356df2 100644
--- a/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
+++ b/jstests/noPassthrough/collmod_convert_to_unique_disallow_duplicates.js
@@ -5,8 +5,6 @@
* @tags: [
* # TODO(SERVER-61182): Fix WiredTigerKVEngine::alterIdentMetadata() under inMemory.
* requires_persistence,
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
* requires_replication,
* ]
*/
@@ -255,4 +253,4 @@ testCollModConvertUniqueWithSideWrites(initialDocsDuplicate, (coll) => {
}, {_id: 1000, a: 100} /* duplicateDoc */, [{ids: [1, 2]}, {ids: [3, 4]}] /* expectedViolations */);
rst.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/collmod_convert_to_unique_locking.js b/jstests/noPassthrough/collmod_convert_to_unique_locking.js
index 852ebef872e..6647cc2ccd8 100644
--- a/jstests/noPassthrough/collmod_convert_to_unique_locking.js
+++ b/jstests/noPassthrough/collmod_convert_to_unique_locking.js
@@ -5,8 +5,6 @@
* @tags: [
* # TODO(SERVER-61182): Fix WiredTigerKVEngine::alterIdentMetadata() under inMemory.
* requires_persistence,
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
* requires_replication,
* ]
*/
@@ -69,4 +67,4 @@ assert.commandWorked(
testDB.runCommand({collMod: collName, index: {keyPattern: {a: 1}, unique: true}}));
rst.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/collmod_disallow_duplicates_restart.js b/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
index bef31ee773d..ef775681526 100644
--- a/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
+++ b/jstests/noPassthrough/collmod_disallow_duplicates_restart.js
@@ -4,8 +4,6 @@
*
* @tags: [
* requires_persistence,
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
* requires_replication,
* ]
*/
@@ -58,4 +56,4 @@ const uniqueIndexes = coll_primary.getIndexes().filter(function(doc) {
assert.eq(1, uniqueIndexes.length);
rst.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js b/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
index a41cbe1d440..c10b1045639 100644
--- a/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
+++ b/jstests/noPassthrough/collmod_disallow_duplicates_step_up.js
@@ -5,8 +5,6 @@
* @tags: [
* # TODO(SERVER-61182): Fix WiredTigerKVEngine::alterIdentMetadata() under inMemory.
* requires_persistence,
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
* requires_replication,
* ]
*/
@@ -58,4 +56,4 @@ const uniqueIndexes = coll_secondary.getIndexes().filter(function(doc) {
assert.eq(1, uniqueIndexes.length);
rst.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/collmod_index_noop.js b/jstests/noPassthrough/collmod_index_noop.js
index e10857e8dbe..9bc4bdecc88 100644
--- a/jstests/noPassthrough/collmod_index_noop.js
+++ b/jstests/noPassthrough/collmod_index_noop.js
@@ -7,8 +7,6 @@
* @tags: [
* # TODO(SERVER-61182): Fix WiredTigerKVEngine::alterIdentMetadata() under inMemory.
* requires_persistence,
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/columnstore_index_persistence.js b/jstests/noPassthrough/columnstore_index_persistence.js
index ad9b4d77a2a..b6ad24cb908 100644
--- a/jstests/noPassthrough/columnstore_index_persistence.js
+++ b/jstests/noPassthrough/columnstore_index_persistence.js
@@ -3,8 +3,6 @@
*
* @tags: [
* requires_persistence,
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/command_line_parsing.js b/jstests/noPassthrough/command_line_parsing.js
index 95ecdb6cb85..8bb51780474 100644
--- a/jstests/noPassthrough/command_line_parsing.js
+++ b/jstests/noPassthrough/command_line_parsing.js
@@ -31,7 +31,6 @@ delete m2result.parsed.net.transportLayer;
delete m2result.parsed.setParameter;
delete m2result.parsed.storage.engine;
delete m2result.parsed.storage.inMemory;
-delete m2result.parsed.storage.journal;
delete m2result.parsed.storage.rocksdb;
delete m2result.parsed.storage.wiredTiger;
delete m2result.parsed.replication; // Removes enableMajorityReadConcern setting.
@@ -58,7 +57,6 @@ delete m3result.parsed.net.transportLayer;
delete m3result.parsed.setParameter;
delete m3result.parsed.storage.engine;
delete m3result.parsed.storage.inMemory;
-delete m3result.parsed.storage.journal;
delete m3result.parsed.storage.rocksdb;
delete m3result.parsed.storage.wiredTiger;
delete m3result.parsed.replication; // Removes enableMajorityReadConcern setting.
diff --git a/jstests/noPassthrough/comment_field_passthrough.js b/jstests/noPassthrough/comment_field_passthrough.js
index f4c5e11f008..41a1994ba45 100644
--- a/jstests/noPassthrough/comment_field_passthrough.js
+++ b/jstests/noPassthrough/comment_field_passthrough.js
@@ -2,7 +2,6 @@
* Verify that adding 'comment' field to any command shouldn't cause unexpected failures.
* @tags: [
* requires_capped,
- * requires_journaling,
* requires_persistence,
* requires_replication,
* requires_sharding,
diff --git a/jstests/noPassthrough/commit_quorum_voting_nodes.js b/jstests/noPassthrough/commit_quorum_voting_nodes.js
index 87e7b1ce27a..19e233a8068 100644
--- a/jstests/noPassthrough/commit_quorum_voting_nodes.js
+++ b/jstests/noPassthrough/commit_quorum_voting_nodes.js
@@ -2,7 +2,6 @@
* Tests that index build commitQuorum can include non-voting data-bearing nodes.
*
* @tags: [
- * requires_journaling,
* requires_persistence,
* requires_replication,
* ]
diff --git a/jstests/noPassthrough/dbcheck_batch_deadline.js b/jstests/noPassthrough/dbcheck_batch_deadline.js
index 30432ab4a85..7e588aae095 100644
--- a/jstests/noPassthrough/dbcheck_batch_deadline.js
+++ b/jstests/noPassthrough/dbcheck_batch_deadline.js
@@ -3,7 +3,6 @@
* the following batch resumes from where the previous one left off.
*
* @tags: [
- * requires_journaling,
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/deprecated_map_reduce.js b/jstests/noPassthrough/deprecated_map_reduce.js
index ba1ae3daae2..008e132cf90 100644
--- a/jstests/noPassthrough/deprecated_map_reduce.js
+++ b/jstests/noPassthrough/deprecated_map_reduce.js
@@ -59,16 +59,6 @@ matchingLogLines = [...findMatchingLogLines(globalLogs.log, fieldMatcher)];
assert.eq(matchingLogLines.length, 1, matchingLogLines);
MongoRunner.stopMongod(standalone);
-// The tests below this comment connect through mongos and shard a collection (creating replica
-// sets). This if stanza assures that we skip the portion below if testing a build variant with
-// --nojournal and WiredTiger, as that variant will always fail when using replica sets.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
jsTest.log('Test cluster');
const st = new ShardingTest({shards: 2, mongos: 1});
diff --git a/jstests/noPassthrough/durable_hidden_index.js b/jstests/noPassthrough/durable_hidden_index.js
index 1552b2b94c5..7ed844370cf 100644
--- a/jstests/noPassthrough/durable_hidden_index.js
+++ b/jstests/noPassthrough/durable_hidden_index.js
@@ -3,7 +3,7 @@
* into the index catalog, that is hidden index remains hidden after restart.
*
* @tags: [
- * requires_journaling,
+ * requires_persistence,
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/durable_view_catalog.js b/jstests/noPassthrough/durable_view_catalog.js
index bee42d25335..39db5b0b855 100644
--- a/jstests/noPassthrough/durable_view_catalog.js
+++ b/jstests/noPassthrough/durable_view_catalog.js
@@ -12,7 +12,7 @@
let dbpath = MongoRunner.dataPath + '_durable_view_catalog';
resetDbpath(dbpath);
-let mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
+let mongodArgs = {dbpath: dbpath, noCleanData: true};
// Start a mongod.
let conn = MongoRunner.runMongod(mongodArgs);
diff --git a/jstests/noPassthrough/exit_logging.js b/jstests/noPassthrough/exit_logging.js
index 172d56cde3d..ac4a2390244 100644
--- a/jstests/noPassthrough/exit_logging.js
+++ b/jstests/noPassthrough/exit_logging.js
@@ -82,9 +82,7 @@ if (_isAddressSanitizerActive()) {
runAllTests({
start: function(opts) {
- var actualOpts = {nojournal: ""};
- Object.extend(actualOpts, opts);
- return MongoRunner.runMongod(actualOpts);
+ return MongoRunner.runMongod(opts);
},
stop: MongoRunner.stopMongod
diff --git a/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js b/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
index 5eb00416f86..f35a75e7abb 100644
--- a/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
+++ b/jstests/noPassthrough/fail_point_getmore_after_cursor_checkout.js
@@ -1,6 +1,6 @@
/**
* Test that 'failGetMoreAfterCursorCheckout' works.
- * @tags: [requires_replication, requires_journaling]
+ * @tags: [requires_replication]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthrough/index_build_prepareUnique.js b/jstests/noPassthrough/index_build_prepareUnique.js
index b378d79cdab..44f58126ea9 100644
--- a/jstests/noPassthrough/index_build_prepareUnique.js
+++ b/jstests/noPassthrough/index_build_prepareUnique.js
@@ -3,9 +3,7 @@
* option.
*
* @tags: [
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
- * requires_journaling,
+ * requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/index_build_restart_secondary.js b/jstests/noPassthrough/index_build_restart_secondary.js
index 490557d9743..ff2f1647f4b 100644
--- a/jstests/noPassthrough/index_build_restart_secondary.js
+++ b/jstests/noPassthrough/index_build_restart_secondary.js
@@ -3,7 +3,6 @@
* node starts back up.
*
* @tags: [
- * requires_journaling,
* requires_persistence,
* requires_replication,
* ]
diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js
index ed8ebc41828..8c2a840ee75 100644
--- a/jstests/noPassthrough/indexbg1.js
+++ b/jstests/noPassthrough/indexbg1.js
@@ -6,7 +6,7 @@
load("jstests/noPassthrough/libs/index_build.js");
-const conn = MongoRunner.runMongod({nojournal: ""});
+const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod failed to start.");
var db = conn.getDB("test");
var baseName = "jstests_indexbg1";
diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js
index 947c8156493..e374a406bfc 100644
--- a/jstests/noPassthrough/indexbg2.js
+++ b/jstests/noPassthrough/indexbg2.js
@@ -4,7 +4,7 @@
(function() {
"use strict";
-const conn = MongoRunner.runMongod({nojournal: ""});
+const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod failed to start.");
let db = conn.getDB("test");
diff --git a/jstests/noPassthrough/killOp_against_journal_flusher_thread.js b/jstests/noPassthrough/killOp_against_journal_flusher_thread.js
index 94cddbc2d28..e82d318f2df 100644
--- a/jstests/noPassthrough/killOp_against_journal_flusher_thread.js
+++ b/jstests/noPassthrough/killOp_against_journal_flusher_thread.js
@@ -1,7 +1,7 @@
/**
* Tests that killOp is ineffectual against the journal flusher thread.
*
- * @tags: [requires_journaling]
+ * @tags: [requires_replication]
*/
(function() {
@@ -10,7 +10,7 @@
load("jstests/libs/fail_point_util.js");
const rst = new ReplSetTest({nodes: 1});
-rst.startSet({journal: "", journalCommitInterval: 500});
+rst.startSet({journalCommitInterval: 500});
rst.initiate();
const primary = rst.getPrimary();
diff --git a/jstests/noPassthrough/large_txn_correctness.js b/jstests/noPassthrough/large_txn_correctness.js
index 5f240dd3cca..130f169084a 100644
--- a/jstests/noPassthrough/large_txn_correctness.js
+++ b/jstests/noPassthrough/large_txn_correctness.js
@@ -2,7 +2,7 @@
* This test serves to ensure that the oplog batcher behavior correctly processes large transactions
* so that it does not cause any correctness problems.
*
- * @tags: [requires_journaling]
+ * @tags: [requires_replication]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthrough/logical_session_cursor_checks.js b/jstests/noPassthrough/logical_session_cursor_checks.js
index cbdb2a38011..8dd1c6764df 100644
--- a/jstests/noPassthrough/logical_session_cursor_checks.js
+++ b/jstests/noPassthrough/logical_session_cursor_checks.js
@@ -66,7 +66,7 @@ function runFixture(Fixture) {
}
function Standalone() {
- this.standalone = MongoRunner.runMongod({auth: "", nojournal: ""});
+ this.standalone = MongoRunner.runMongod({auth: ""});
}
Standalone.prototype.stop = function() {
diff --git a/jstests/noPassthrough/max_time_ms_sharded.js b/jstests/noPassthrough/max_time_ms_sharded.js
index a0a34762874..6f46f1cb472 100644
--- a/jstests/noPassthrough/max_time_ms_sharded.js
+++ b/jstests/noPassthrough/max_time_ms_sharded.js
@@ -2,7 +2,7 @@
// time to shards, and that mongos correctly times out max time sharded getmore operations (which
// are run in parallel on shards).
// @tags: [
-// requires_journaling,
+// requires_replication,
// ]
(function() {
'use strict';
diff --git a/jstests/noPassthrough/non_durable_writes_on_primary_can_reach_majority.js b/jstests/noPassthrough/non_durable_writes_on_primary_can_reach_majority.js
index 3e87ed488ec..93d2d4bc520 100644
--- a/jstests/noPassthrough/non_durable_writes_on_primary_can_reach_majority.js
+++ b/jstests/noPassthrough/non_durable_writes_on_primary_can_reach_majority.js
@@ -9,8 +9,8 @@
* Then tests that writes cannot be majority confirmed without the primary and only one secondary.
*
* @tags: [
- * # inMemory has journaling off, so {j:true} writes are not allowed.
- * requires_journaling,
+ * requires_persistence,
+ * requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/oplog_retention_hours.js b/jstests/noPassthrough/oplog_retention_hours.js
index 5e7bdb2457d..fd6061cf532 100644
--- a/jstests/noPassthrough/oplog_retention_hours.js
+++ b/jstests/noPassthrough/oplog_retention_hours.js
@@ -10,7 +10,7 @@
* current size of the oplog is less than --oplogSize only after the minimum retention time has
* passed since inserting the first set of oplog entries
*
- * @tags: [requires_journaling]
+ * @tags: [requires_replication]
*/
(function() {
"use strict";
diff --git a/jstests/noPassthrough/oplog_rollover_agg.js b/jstests/noPassthrough/oplog_rollover_agg.js
index 447a8b72e1e..4c1a362578c 100644
--- a/jstests/noPassthrough/oplog_rollover_agg.js
+++ b/jstests/noPassthrough/oplog_rollover_agg.js
@@ -1,6 +1,6 @@
// Tests the behaviour of an agg with $_requestReshardingResumeToken on an oplog which rolls over.
// @tags: [
-// requires_journaling,
+// requires_replication,
// requires_majority_read_concern,
// ]
(function() {
diff --git a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
index 35fc36e1ad8..77e6709aa9f 100644
--- a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
+++ b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js
@@ -3,7 +3,6 @@
* documents into account. In particular, a shard which possesses *only* orphan documents does not
* induce the infinite-loop behaviour detailed in SERVER-36871.
* @tags: [
- * requires_journaling,
* requires_replication,
* ]
*/
@@ -151,4 +150,4 @@ runSampleAndConfirmResults({
});
st.stop();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/out_majority_read_replset.js b/jstests/noPassthrough/out_majority_read_replset.js
index a9a3029695d..1a91d796ecd 100644
--- a/jstests/noPassthrough/out_majority_read_replset.js
+++ b/jstests/noPassthrough/out_majority_read_replset.js
@@ -9,14 +9,6 @@ load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries
const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}});
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
rst.startSet();
rst.initiate();
diff --git a/jstests/noPassthrough/out_merge_majority_read.js b/jstests/noPassthrough/out_merge_majority_read.js
index 61c4cab92be..3c56851e6b6 100644
--- a/jstests/noPassthrough/out_merge_majority_read.js
+++ b/jstests/noPassthrough/out_merge_majority_read.js
@@ -13,14 +13,6 @@
(function() {
'use strict';
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
const testServer = MongoRunner.runMongod();
const db = testServer.getDB("test");
if (!db.serverStatus().storageEngine.supportsCommittedReads) {
diff --git a/jstests/noPassthrough/query_yields_catch_index_corruption.js b/jstests/noPassthrough/query_yields_catch_index_corruption.js
index d05119e8b1e..162efd8bb33 100644
--- a/jstests/noPassthrough/query_yields_catch_index_corruption.js
+++ b/jstests/noPassthrough/query_yields_catch_index_corruption.js
@@ -1,5 +1,4 @@
// @tags: [
-// requires_journaling,
// requires_persistence,
// ]
(function() {
diff --git a/jstests/noPassthrough/readConcern_atClusterTime.js b/jstests/noPassthrough/readConcern_atClusterTime.js
index 90d405e5773..37c644b8d35 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime.js
@@ -16,13 +16,6 @@ function _getClusterTime(rst) {
(function() {
"use strict";
-// Skip this test if running with --nojournal.
-if (jsTest.options().noJournal) {
- print(
- "Skipping test because running without journaling isn't a valid replica set configuration");
- return;
-}
-
const dbName = "test";
const collName = "coll";
diff --git a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
index 7063207141c..f5007554dcc 100644
--- a/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
+++ b/jstests/noPassthrough/readConcern_atClusterTime_noop_write.js
@@ -11,14 +11,6 @@
load("jstests/replsets/rslib.js");
load("jstests/libs/fail_point_util.js");
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
if (!assert.commandWorked(conn.getDB("test").serverStatus())
diff --git a/jstests/noPassthrough/read_concern_snapshot_yielding.js b/jstests/noPassthrough/read_concern_snapshot_yielding.js
index c2994266f5f..d0ab3b961c8 100644
--- a/jstests/noPassthrough/read_concern_snapshot_yielding.js
+++ b/jstests/noPassthrough/read_concern_snapshot_yielding.js
@@ -9,14 +9,6 @@
load("jstests/libs/curop_helpers.js"); // For waitForCurOpByFailPoint().
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
const dbName = "test";
const collName = "coll";
diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js
index a0e4d1969af..f89d9d1ddca 100644
--- a/jstests/noPassthrough/read_majority.js
+++ b/jstests/noPassthrough/read_majority.js
@@ -21,14 +21,6 @@ load("jstests/libs/analyze_plan.js");
(function() {
"use strict";
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
// Tests the functionality for committed reads for the given read concern level.
function testReadConcernLevel(level) {
var replTest = new ReplSetTest({
diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js
index 82d40af334d..24db2e3ca69 100644
--- a/jstests/noPassthrough/read_majority_reads.js
+++ b/jstests/noPassthrough/read_majority_reads.js
@@ -19,14 +19,6 @@
(function() {
'use strict';
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
var testServer = MongoRunner.runMongod();
var db = testServer.getDB("test");
if (!db.serverStatus().storageEngine.supportsCommittedReads) {
diff --git a/jstests/noPassthrough/read_write_concern_defaults_startup.js b/jstests/noPassthrough/read_write_concern_defaults_startup.js
index 5d7aa9596cf..1bf1df3ec29 100644
--- a/jstests/noPassthrough/read_write_concern_defaults_startup.js
+++ b/jstests/noPassthrough/read_write_concern_defaults_startup.js
@@ -3,8 +3,8 @@
//
// This test restarts a replica set node, which requires persistence and journaling.
// @tags: [
-// requires_journaling,
// requires_persistence,
+// requires_replication,
// requires_sharding,
// ]
(function() {
diff --git a/jstests/noPassthrough/reconfig_for_psa_set_shell.js b/jstests/noPassthrough/reconfig_for_psa_set_shell.js
index 1079d6eb44b..e828a0042aa 100644
--- a/jstests/noPassthrough/reconfig_for_psa_set_shell.js
+++ b/jstests/noPassthrough/reconfig_for_psa_set_shell.js
@@ -2,7 +2,7 @@
* Tests the 'reconfigForPSASet()' shell function and makes sure that reconfig will succeed while
* preserving majority reads.
*
- * @tags: [requires_journaling]
+ * @tags: [requires_replication]
*/
(function() {
diff --git a/jstests/noPassthrough/refresh_sessions_command.js b/jstests/noPassthrough/refresh_sessions_command.js
index a0a65fb4695..a6bdd8087cb 100644
--- a/jstests/noPassthrough/refresh_sessions_command.js
+++ b/jstests/noPassthrough/refresh_sessions_command.js
@@ -32,7 +32,7 @@ assert.commandWorked(result, "could not run refreshSessions logged in with --aut
// Turn on auth for further testing.
MongoRunner.stopMongod(conn);
-conn = MongoRunner.runMongod({auth: "", nojournal: "", setParameter: {maxSessions: 3}});
+conn = MongoRunner.runMongod({auth: "", setParameter: {maxSessions: 3}});
admin = conn.getDB("admin");
admin.createUser({user: 'admin', pwd: 'admin', roles: ['readAnyDatabase', 'userAdminAnyDatabase']});
diff --git a/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js b/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
index 84993043ec6..4b88ac24960 100644
--- a/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
+++ b/jstests/noPassthrough/reindex_crash_rebuilds_id_index.js
@@ -3,7 +3,6 @@
* without an _id index. On startup, mongod should automatically build any missing _id indexes.
*
* @tags: [
- * requires_journaling,
* requires_persistence
* ]
*/
diff --git a/jstests/noPassthrough/require_api_version.js b/jstests/noPassthrough/require_api_version.js
index 1e44d17314b..93dd6132526 100644
--- a/jstests/noPassthrough/require_api_version.js
+++ b/jstests/noPassthrough/require_api_version.js
@@ -5,7 +5,6 @@
* requireApiVersion is true.
*
* @tags: [
- * requires_journaling,
* requires_replication,
* requires_transactions,
* ]
diff --git a/jstests/noPassthrough/serverStatus_does_not_block_on_RSTL.js b/jstests/noPassthrough/serverStatus_does_not_block_on_RSTL.js
index 8865544471c..507482f12a6 100644
--- a/jstests/noPassthrough/serverStatus_does_not_block_on_RSTL.js
+++ b/jstests/noPassthrough/serverStatus_does_not_block_on_RSTL.js
@@ -5,8 +5,6 @@
* # Certain serverStatus sections might pivot to taking the RSTL lock if an action is unsupported
* # by a non-WT storage engine.
* requires_wiredtiger,
- * # Replication requires journaling support so this tag also implies exclusion from --nojournal
- * # test configurations.
* requires_sharding,
* requires_replication,
* ]
diff --git a/jstests/noPassthrough/server_write_concern_metrics.js b/jstests/noPassthrough/server_write_concern_metrics.js
index c0b646d7b0d..67f175cfb2a 100644
--- a/jstests/noPassthrough/server_write_concern_metrics.js
+++ b/jstests/noPassthrough/server_write_concern_metrics.js
@@ -1,6 +1,5 @@
// Tests writeConcern metrics in the serverStatus output.
// @tags: [
-// requires_journaling,
// requires_persistence,
// requires_replication,
// ]
diff --git a/jstests/noPassthrough/standalone_replication_recovery.js b/jstests/noPassthrough/standalone_replication_recovery.js
index dd83f2c438a..afe4e755b7a 100644
--- a/jstests/noPassthrough/standalone_replication_recovery.js
+++ b/jstests/noPassthrough/standalone_replication_recovery.js
@@ -3,7 +3,6 @@
*
* This test only makes sense for storage engines that support recover to stable timestamp.
* @tags: [
- * requires_journaling,
* requires_majority_read_concern,
* requires_persistence,
* requires_replication,
@@ -167,4 +166,4 @@ restartServerReplication(secondary);
// Skip checking db hashes since we do a write as a standalone.
TestData.skipCheckDBHashes = true;
rst.stopSet();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/noPassthrough/start_session_command.js b/jstests/noPassthrough/start_session_command.js
index 0ad209c30e7..1068561672c 100644
--- a/jstests/noPassthrough/start_session_command.js
+++ b/jstests/noPassthrough/start_session_command.js
@@ -47,7 +47,7 @@ MongoRunner.stopMongod(conn);
//
-conn = MongoRunner.runMongod({auth: "", nojournal: ""});
+conn = MongoRunner.runMongod({auth: ""});
admin = conn.getDB("admin");
// test that we can't run startSession unauthenticated when the server is running with --auth
diff --git a/jstests/noPassthrough/startup_recovery_truncates_oplog_holes_after_primary_crash.js b/jstests/noPassthrough/startup_recovery_truncates_oplog_holes_after_primary_crash.js
index 0a39d6fbacc..f2f6b208f10 100644
--- a/jstests/noPassthrough/startup_recovery_truncates_oplog_holes_after_primary_crash.js
+++ b/jstests/noPassthrough/startup_recovery_truncates_oplog_holes_after_primary_crash.js
@@ -6,8 +6,7 @@
* for no holes for writes with {j: true} write concern, and no confirmed writes will be truncated.
*
* @tags: [
- * # Replica sets using WT require journaling (startup error otherwise).
- * requires_journaling,
+ * requires_replication,
* # The primary is restarted and must retain its data.
* requires_persistence,
* ]
diff --git a/jstests/noPassthrough/sync_write.js b/jstests/noPassthrough/sync_write.js
index a76d86ec7f0..0ad1cb384b3 100644
--- a/jstests/noPassthrough/sync_write.js
+++ b/jstests/noPassthrough/sync_write.js
@@ -11,7 +11,7 @@
var dbpath = MongoRunner.dataPath + 'sync_write';
resetDbpath(dbpath);
-var mongodArgs = {dbpath: dbpath, noCleanData: true, journal: ''};
+var mongodArgs = {dbpath: dbpath, noCleanData: true};
// Start a mongod.
var conn = MongoRunner.runMongod(mongodArgs);
diff --git a/jstests/noPassthrough/temporarily_unavailable_error.js b/jstests/noPassthrough/temporarily_unavailable_error.js
index 968a257f816..628fa46e39d 100644
--- a/jstests/noPassthrough/temporarily_unavailable_error.js
+++ b/jstests/noPassthrough/temporarily_unavailable_error.js
@@ -4,7 +4,6 @@
*
* @tags: [
* # Exclude in-memory engine, rollbacks due to pinned cache content rely on eviction.
- * requires_journaling,
* requires_persistence,
* requires_replication,
* requires_wiredtiger,
diff --git a/jstests/noPassthrough/timeseries_idle_buckets.js b/jstests/noPassthrough/timeseries_idle_buckets.js
index 651e7a446f2..06a2c0941ba 100644
--- a/jstests/noPassthrough/timeseries_idle_buckets.js
+++ b/jstests/noPassthrough/timeseries_idle_buckets.js
@@ -2,8 +2,6 @@
* Tests that idle buckets are removed when the bucket catalog's memory threshold is reached.
*
* @tags: [
- * # Replication requires journaling support so this tag also implies exclusion from
- * # --nojournal test configurations.
* requires_replication,
* ]
*/
diff --git a/jstests/noPassthrough/unsupported_change_stream_deployments.js b/jstests/noPassthrough/unsupported_change_stream_deployments.js
index 2dcb2b46dcb..933890e4011 100644
--- a/jstests/noPassthrough/unsupported_change_stream_deployments.js
+++ b/jstests/noPassthrough/unsupported_change_stream_deployments.js
@@ -5,14 +5,6 @@
"use strict";
load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
function assertChangeStreamNotSupportedOnConnection(conn) {
const notReplicaSetErrorCode = 40573;
assertErrorCode(conn.getDB("test").non_existent, [{$changeStream: {}}], notReplicaSetErrorCode);
diff --git a/jstests/noPassthrough/verify_session_cache_updates.js b/jstests/noPassthrough/verify_session_cache_updates.js
index 6303931ef60..30ae9de1459 100644
--- a/jstests/noPassthrough/verify_session_cache_updates.js
+++ b/jstests/noPassthrough/verify_session_cache_updates.js
@@ -66,7 +66,7 @@ function runTest(conn) {
}
{
- var mongod = MongoRunner.runMongod({nojournal: ""});
+ var mongod = MongoRunner.runMongod();
runTest(mongod);
MongoRunner.stopMongod(mongod);
}
diff --git a/jstests/noPassthrough/wt_change_log_compressor.js b/jstests/noPassthrough/wt_change_log_compressor.js
new file mode 100644
index 00000000000..af77f1191c9
--- /dev/null
+++ b/jstests/noPassthrough/wt_change_log_compressor.js
@@ -0,0 +1,42 @@
+/**
+ * Test that WT log compressor settings can be changed between clean shutdowns.
+ *
+ * @tags: [requires_wiredtiger, requires_replication, requires_persistence]
+ */
+
+(function() {
+'use strict';
+
+const initOpt = {
+ wiredTigerEngineConfigString: 'log=(compressor=snappy)'
+};
+
+const replSetTest = new ReplSetTest({nodes: 2, nodeOptions: initOpt});
+replSetTest.startSet();
+replSetTest.initiate();
+
+// Perform a write larger than 128 bytes so to ensure there is a compressed journal entry. Not
+// really necessary as the oplog is filled with system entries just by starting up the replset, but
+// in case somethings changes in the future. 10kb value used in case this threshold is modified.
+const testDB = replSetTest.getPrimary().getDB('test');
+testDB.coll.insert({a: 'a'.repeat(10 * 1024)});
+
+const restartNodeWithOpts = function(node, opts) {
+ // Mongod clean shutdown by SIGINT.
+ replSetTest.stop(node, 2, {allowedExitCode: MongoRunner.EXIT_CLEAN});
+ replSetTest.start(node, opts);
+};
+
+const optChangeJournal = {
+ noCleanData: true, // Keep dbpath data from previous start.
+ wiredTigerEngineConfigString: 'log=(compressor=zstd)'
+};
+
+// Restart nodes in a rolling fashion, none should crash due to decompression errors.
+for (const node of replSetTest.getSecondaries()) {
+ restartNodeWithOpts(node, optChangeJournal);
+}
+restartNodeWithOpts(replSetTest.getPrimary(), optChangeJournal);
+
+replSetTest.stopSet();
+})();
diff --git a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
index 14f1aad2480..6facafc60b1 100644
--- a/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
+++ b/jstests/noPassthrough/wt_delayed_secondary_read_concern_majority.js
@@ -14,14 +14,6 @@
load("jstests/replsets/rslib.js");
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
// Skip db hash check because delayed secondary will not catch up to primary.
TestData.skipCheckDBHashes = true;
diff --git a/jstests/noPassthrough/wt_nojournal_skip_recovery.js b/jstests/noPassthrough/wt_nojournal_skip_recovery.js
deleted file mode 100644
index 0228c859eb0..00000000000
--- a/jstests/noPassthrough/wt_nojournal_skip_recovery.js
+++ /dev/null
@@ -1,99 +0,0 @@
-/**
- * Tests that having journaled write operations since the last checkpoint triggers an error when
- * --wiredTigerEngineConfigString log=(recover=error) is specified in combination with --nojournal.
- * Also verifies that deleting the journal/ directory allows those operations to safely be ignored.
- */
-(function() {
-'use strict';
-
-// Skip this test if not running with the "wiredTiger" storage engine.
-if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
-}
-
-// Skip this test until we figure out why journaled writes are replayed after last checkpoint.
-TestData.skipCollectionAndIndexValidation = true;
-
-var dbpath = MongoRunner.dataPath + 'wt_nojournal_skip_recovery';
-resetDbpath(dbpath);
-
-// Start a mongod with journaling enabled.
-var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- // Wait an hour between checkpoints to ensure one isn't created after the fsync command is
- // executed and before the mongod is terminated. This is necessary to ensure that exactly 90
- // documents with the 'journaled' field exist in the collection.
- wiredTigerEngineConfigString: 'checkpoint=(wait=3600)'
-});
-assert.neq(null, conn, 'mongod was unable to start up');
-
-// Execute unjournaled inserts, but periodically do a journaled insert. Triggers a checkpoint
-// prior to the mongod being terminated.
-var awaitShell = startParallelShell(function() {
- for (let loopNum = 1; true; ++loopNum) {
- var bulk = db.nojournal.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; ++i) {
- bulk.insert({unjournaled: i});
- }
- assert.commandWorked(bulk.execute({j: false}));
- assert.commandWorked(db.nojournal.insert({journaled: loopNum}, {writeConcern: {j: true}}));
-
- // Create a checkpoint slightly before the mongod is terminated.
- if (loopNum === 90) {
- assert.commandWorked(db.adminCommand({fsync: 1}));
- }
- }
-}, conn.port);
-
-// After some journaled write operations have been performed against the mongod, send a SIGKILL
-// to the process to trigger an unclean shutdown.
-assert.soon(
- function() {
- var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
- if (count >= 100) {
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- return true;
- }
- return false;
- },
- 'the parallel shell did not perform at least 100 journaled inserts',
- 5 * 60 * 1000 /*timeout ms*/);
-
-var exitCode = awaitShell({checkExitSuccess: false});
-assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
-
-// Restart the mongod with journaling disabled, but configure it to error if the database needs
-// recovery.
-assert.throws(() => MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- wiredTigerEngineConfigString: 'log=(recover=error)',
-}),
- [],
- 'mongod should not have started up because it requires recovery');
-
-// Remove the journal files.
-assert(removeFile(dbpath + '/journal'), 'failed to remove the journal directory');
-
-// Restart the mongod with journaling disabled again.
-conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- wiredTigerEngineConfigString: 'log=(recover=error)',
-});
-assert.neq(null, conn, 'mongod was unable to start up after removing the journal directory');
-
-var count = conn.getDB('test').nojournal.count({journaled: {$exists: true}});
-assert.lte(90, count, 'missing documents that were present in the last checkpoint');
-assert.gte(90,
- count,
- 'journaled write operations since the last checkpoint should not have been' +
- ' replayed');
-
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/wt_nojournal_toggle.js b/jstests/noPassthrough/wt_nojournal_toggle.js
deleted file mode 100644
index 41e286b39fc..00000000000
--- a/jstests/noPassthrough/wt_nojournal_toggle.js
+++ /dev/null
@@ -1,123 +0,0 @@
-/**
- * Tests that journaled write operations that have occurred since the last checkpoint are replayed
- * when the mongod is killed and restarted with --nojournal.
- */
-(function() {
-'use strict';
-
-// Skip this test if not running with the "wiredTiger" storage engine.
-if (jsTest.options().storageEngine && jsTest.options().storageEngine !== 'wiredTiger') {
- jsTest.log('Skipping test because storageEngine is not "wiredTiger"');
- return;
-}
-
-// Returns a function that primarily executes unjournaled inserts, but periodically does a
-// journaled insert. If 'checkpoint' is true, then the fsync command is run to create a
-// checkpoint prior to the mongod being terminated.
-function insertFunctionFactory(checkpoint) {
- var insertFunction = function() {
- for (var iter = 0; iter < 1000; ++iter) {
- var bulk = db.nojournal.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; ++i) {
- bulk.insert({unjournaled: i});
- }
- assert.commandWorked(bulk.execute({j: false}));
- assert.commandWorked(db.nojournal.insert({journaled: iter}, {writeConcern: {j: true}}));
- if (__checkpoint_template_placeholder__ && iter === 50) {
- assert.commandWorked(db.adminCommand({fsync: 1}));
- }
- }
- };
-
- return '(' +
- insertFunction.toString().replace('__checkpoint_template_placeholder__',
- checkpoint.toString()) +
- ')();';
-}
-
-function runTest(options) {
- var dbpath = MongoRunner.dataPath + 'wt_nojournal_toggle';
- resetDbpath(dbpath);
-
- // Start a mongod with journaling enabled.
- var conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- });
- assert.neq(null, conn, 'mongod was unable to start up');
-
- // Run a mixture of journaled and unjournaled write operations against the mongod.
- var awaitShell = startParallelShell(insertFunctionFactory(options.checkpoint), conn.port);
-
- // After some journaled write operations have been performed against the mongod, send a
- // SIGKILL to the process to trigger an unclean shutdown.
- assert.soon(function() {
- var testDB = conn.getDB('test');
- var count = testDB.nojournal.count({journaled: {$exists: true}});
- if (count >= 100) {
- // We saw 100 journaled inserts, but visibility does not guarantee durability, so
- // do an extra journaled write to make all visible commits durable, before killing
- // the mongod.
- assert.commandWorked(testDB.nojournal.insert({final: true}, {writeConcern: {j: true}}));
- MongoRunner.stopMongod(conn, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL});
- return true;
- }
- return false;
- }, 'the parallel shell did not perform at least 100 journaled inserts');
-
- var exitCode = awaitShell({checkExitSuccess: false});
- assert.neq(0, exitCode, 'expected shell to exit abnormally due to mongod being terminated');
-
- // Restart the mongod with journaling disabled.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- nojournal: '',
- });
- assert.neq(null, conn, 'mongod was unable to restart after receiving a SIGKILL');
-
- var testDB = conn.getDB('test');
- assert.eq(1, testDB.nojournal.count({final: true}), 'final journaled write was not found');
- assert.lte(100,
- testDB.nojournal.count({journaled: {$exists: true}}),
- 'journaled write operations since the last checkpoint were not replayed');
-
- var initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
- assert.commandWorked(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.eq(initialNumLogWrites,
- testDB.serverStatus().wiredTiger.log['log write operations'],
- 'journaling is still enabled even though --nojournal was specified');
-
- MongoRunner.stopMongod(conn);
-
- // Restart the mongod with journaling enabled.
- conn = MongoRunner.runMongod({
- dbpath: dbpath,
- noCleanData: true,
- journal: '',
- });
- assert.neq(null, conn, 'mongod was unable to start up after re-enabling journaling');
-
- // Change the database object to connect to the restarted mongod.
- testDB = conn.getDB('test');
- initialNumLogWrites = testDB.serverStatus().wiredTiger.log['log write operations'];
-
- assert.commandWorked(testDB.nojournal.insert({a: 1}, {writeConcern: {fsync: true}}));
- assert.lt(initialNumLogWrites,
- testDB.serverStatus().wiredTiger.log['log write operations'],
- 'journaling is still disabled even though --journal was specified');
-
- MongoRunner.stopMongod(conn);
-}
-
-// Operations from the journal should be replayed even when the mongod is terminated before
-// anything is written to disk.
-jsTest.log('Running the test without ever creating a checkpoint');
-runTest({checkpoint: false});
-
-// Repeat the test again, but ensure that some data is written to disk before the mongod is
-// terminated.
-jsTest.log('Creating a checkpoint part-way through running the test');
-runTest({checkpoint: true});
-})();
diff --git a/jstests/noPassthrough/wt_unclean_shutdown.js b/jstests/noPassthrough/wt_unclean_shutdown.js
index b7cbf8efa0a..8545f208e19 100644
--- a/jstests/noPassthrough/wt_unclean_shutdown.js
+++ b/jstests/noPassthrough/wt_unclean_shutdown.js
@@ -4,7 +4,7 @@
* restart and recovery verify that all expected records inserted are there and no records in the
* middle of the data set are lost.
*
- * @tags: [requires_wiredtiger, requires_journaling]
+ * @tags: [requires_wiredtiger]
*/
load('jstests/libs/parallelTester.js'); // For Thread
diff --git a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
index 8830ff4bdc7..548bfd8c394 100644
--- a/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
+++ b/jstests/noPassthroughWithMongod/bench_test_crud_commands.js
@@ -73,10 +73,7 @@ function testWriteConcern() {
testInsert(docs, {"w": "majority"});
testInsert(docs, {"w": 1, "j": false});
- var storageEnginesWithoutJournaling = new Set(["inMemory"]);
- var runningWithoutJournaling = TestData.noJournal ||
- storageEnginesWithoutJournaling.has(db.serverStatus().storageEngine.name);
- if (!runningWithoutJournaling) {
+ if (jsTestOptions().storageEngine != "inMemory") {
// Only test journaled writes if the server actually supports them.
testInsert(docs, {"j": true});
}
diff --git a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
index 25083f1484c..3a4e48d9dbc 100644
--- a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
+++ b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js
@@ -2,7 +2,7 @@
* This file tests that commands that should accept a writeConcern on a standalone can accept one.
* This does not test that writes with j: true are actually made durable or that if j: true fails
* that there is a writeConcern error.
- * @tags: [requires_journaling]
+ * @tags: [requires_persistence]
*/
(function() {
diff --git a/jstests/replsets/batch_write_command_wc.js b/jstests/replsets/batch_write_command_wc.js
index 4f00ef61373..efb03a60a34 100644
--- a/jstests/replsets/batch_write_command_wc.js
+++ b/jstests/replsets/batch_write_command_wc.js
@@ -1,11 +1,10 @@
// Tests write-concern-related batch write protocol functionality
(function() {
-// Skip this test if running with the "wiredTiger" storage engine, since it requires
-// using 'nojournal' in a replica set, which is not supported when using WT.
-if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+// Skip this test when running with storage engines other than inMemory, as the test relies on
+// journaling not being active.
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTest.log("Skipping test because it is only applicable for the inMemory storage engine");
return;
}
@@ -19,7 +18,7 @@ jsTest.log("Starting no journal/repl set tests...");
// Start a single-node replica set with no journal
// Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({nodes: 2});
-rst.startSet({nojournal: ""});
+rst.startSet();
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.batch_write_command_wc");
diff --git a/jstests/replsets/bulk_api_wc.js b/jstests/replsets/bulk_api_wc.js
index 8bc52ef04ff..dacd99d152e 100644
--- a/jstests/replsets/bulk_api_wc.js
+++ b/jstests/replsets/bulk_api_wc.js
@@ -3,18 +3,17 @@
jsTest.log("Starting bulk api write concern tests...");
-// Skip this test if running with the "wiredTiger" storage engine, since it requires
-// using 'nojournal' in a replica set, which is not supported when using WT.
-if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+// Skip this test when running with storage engines other than inMemory, as the test relies on
+// journaling not being active.
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTest.log("Skipping test because it is only applicable for the inMemory storage engine");
return;
}
// Start a 2-node replica set with no journal.
// Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({nodes: 2});
-rst.startSet({nojournal: ""});
+rst.startSet();
rst.initiate();
var mongod = rst.getPrimary();
var coll = mongod.getCollection("test.bulk_api_wc");
diff --git a/jstests/replsets/find_and_modify_wc.js b/jstests/replsets/find_and_modify_wc.js
index 236ddad1afb..a7800d58aa7 100644
--- a/jstests/replsets/find_and_modify_wc.js
+++ b/jstests/replsets/find_and_modify_wc.js
@@ -4,17 +4,16 @@
(function() {
'use strict';
-// Skip this test if running with the "wiredTiger" storage engine, since it requires
-// using 'nojournal' in a replica set, which is not supported when using WT.
-if (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger") {
- // WT is currently the default engine so it is used when 'storageEngine' is not set.
- jsTest.log("Skipping test because it is not applicable for the wiredTiger storage engine");
+// Skip this test when running with storage engines other than inMemory, as the test relies on
+// journaling not being active.
+if (jsTest.options().storageEngine !== "inMemory") {
+ jsTest.log("Skipping test because it is only applicable for the inMemory storage engine");
return;
}
var nodeCount = 3;
var rst = new ReplSetTest({nodes: nodeCount});
-rst.startSet({nojournal: ""});
+rst.startSet();
rst.initiate();
var primary = rst.getPrimary();
diff --git a/jstests/replsets/rollback_dup_ids.js b/jstests/replsets/rollback_dup_ids.js
index f8da13f9446..75297118d80 100644
--- a/jstests/replsets/rollback_dup_ids.js
+++ b/jstests/replsets/rollback_dup_ids.js
@@ -1,6 +1,6 @@
// When run with --majorityReadConcern=off, this test reproduces the bug described in SERVER-38925,
// where rolling back a delete followed by a restart produces documents with duplicate _id.
-// @tags: [requires_persistence, requires_journaling]
+// @tags: [requires_persistence]
(function() {
"use strict";
@@ -40,4 +40,4 @@ rollbackTest.restartNode(0, 9);
// Check the replica set.
rollbackTest.stop();
-}()); \ No newline at end of file
+}());
diff --git a/jstests/replsets/standalone_replication_recovery_idempotent.js b/jstests/replsets/standalone_replication_recovery_idempotent.js
index 05d807952de..79ae863afa6 100644
--- a/jstests/replsets/standalone_replication_recovery_idempotent.js
+++ b/jstests/replsets/standalone_replication_recovery_idempotent.js
@@ -3,7 +3,7 @@
* idempotent.
*
* This test only makes sense for storage engines that support recover to stable timestamp.
- * @tags: [requires_persistence, requires_journaling, requires_replication,
+ * @tags: [requires_persistence, requires_replication,
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction,
* # Restarting as a standalone is not supported in multiversion tests.
* multiversion_incompatible]
diff --git a/jstests/replsets/standalone_replication_recovery_prepare_only.js b/jstests/replsets/standalone_replication_recovery_prepare_only.js
index 7350bea9513..37a1ae9df36 100644
--- a/jstests/replsets/standalone_replication_recovery_prepare_only.js
+++ b/jstests/replsets/standalone_replication_recovery_prepare_only.js
@@ -3,7 +3,7 @@
* 'recoverFromOplogAsStandalone' flag.
*
* This test only makes sense for storage engines that support recover to stable timestamp.
- * @tags: [requires_persistence, requires_journaling, requires_replication,
+ * @tags: [requires_persistence, requires_replication,
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction]
*/
(function() {
diff --git a/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js b/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
index 19813c1ce77..9895ea5a040 100644
--- a/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
+++ b/jstests/replsets/standalone_replication_recovery_prepare_with_commit.js
@@ -3,7 +3,7 @@
* 'recoverFromOplogAsStandalone' flag.
*
* This test only makes sense for storage engines that support recover to stable timestamp.
- * @tags: [requires_persistence, requires_journaling, requires_replication,
+ * @tags: [requires_persistence, requires_replication,
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction]
*/
(function() {
diff --git a/jstests/replsets/standalone_replication_recovery_relaxes_index_constraints.js b/jstests/replsets/standalone_replication_recovery_relaxes_index_constraints.js
index 898c8b35d8e..3b8de49b3bd 100644
--- a/jstests/replsets/standalone_replication_recovery_relaxes_index_constraints.js
+++ b/jstests/replsets/standalone_replication_recovery_relaxes_index_constraints.js
@@ -4,7 +4,7 @@
* never fail without a bug.
*
* This test only makes sense for storage engines that support recover to stable timestamp.
- * @tags: [requires_persistence, requires_journaling, requires_replication,
+ * @tags: [requires_persistence, requires_replication,
* requires_majority_read_concern,
* # Restarting as a standalone is not supported in multiversion tests.
* multiversion_incompatible]
diff --git a/jstests/replsets/startup_recovery_for_restore.js b/jstests/replsets/startup_recovery_for_restore.js
index bb17808bc0e..2b629c7dcfa 100644
--- a/jstests/replsets/startup_recovery_for_restore.js
+++ b/jstests/replsets/startup_recovery_for_restore.js
@@ -3,7 +3,7 @@
* "for restore" mode, but not read from older points-in-time on the recovered node.
*
* This test only makes sense for storage engines that support recover to stable timestamp.
- * @tags: [requires_persistence, requires_journaling, requires_replication,
+ * @tags: [requires_persistence, requires_replication,
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction,
* # We don't expect to do this while upgrading.
* multiversion_incompatible]
diff --git a/jstests/replsets/startup_recovery_for_restore_needs_rollback.js b/jstests/replsets/startup_recovery_for_restore_needs_rollback.js
index 45c7551e50b..d6c65e67e04 100644
--- a/jstests/replsets/startup_recovery_for_restore_needs_rollback.js
+++ b/jstests/replsets/startup_recovery_for_restore_needs_rollback.js
@@ -4,7 +4,7 @@
* than attempt to use the node.
*
* This test only makes sense for storage engines that support recover to stable timestamp.
- * @tags: [requires_persistence, requires_journaling, requires_replication,
+ * @tags: [requires_persistence, requires_replication,
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction,
* # We don't expect to do this while upgrading.
* multiversion_incompatible]
diff --git a/jstests/replsets/startup_recovery_for_restore_restarts.js b/jstests/replsets/startup_recovery_for_restore_restarts.js
index 34868d4a220..418af92debb 100644
--- a/jstests/replsets/startup_recovery_for_restore_restarts.js
+++ b/jstests/replsets/startup_recovery_for_restore_restarts.js
@@ -4,7 +4,7 @@
* we can do so even after we crash in the middle of an attempt to restore.
*
* This test only makes sense for storage engines that support recover to stable timestamp.
- * @tags: [requires_wiredtiger, requires_persistence, requires_journaling, requires_replication,
+ * @tags: [requires_wiredtiger, requires_persistence, requires_replication,
* requires_majority_read_concern, uses_transactions, uses_prepare_transaction,
* # We don't expect to do this while upgrading.
* multiversion_incompatible]
diff --git a/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js b/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js
index c4ca54443af..cded13b4ddf 100644
--- a/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js
+++ b/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js
@@ -9,7 +9,6 @@
* incompatible_with_macos,
* incompatible_with_windows_tls,
* requires_fcv_52,
- * requires_journaling,
* requires_replication,
* requires_persistence,
* requires_wiredtiger,
diff --git a/jstests/selinux/core.js b/jstests/selinux/core.js
index cc12e46c122..c09f6a194d2 100644
--- a/jstests/selinux/core.js
+++ b/jstests/selinux/core.js
@@ -8,7 +8,7 @@ class TestDefinition extends SelinuxBaseTest {
return {
"systemLog":
{"destination": "file", "logAppend": true, "path": "/var/log/mongodb/mongod.log"},
- "storage": {"dbPath": "/var/lib/mongo", "journal": {"enabled": true}},
+ "storage": {"dbPath": "/var/lib/mongo"},
"processManagement": {
"fork": true,
"pidFilePath": "/var/run/mongodb/mongod.pid",
diff --git a/jstests/serverless/rd.js b/jstests/serverless/rd.js
index 9662dd7f2aa..572c49f75e9 100644
--- a/jstests/serverless/rd.js
+++ b/jstests/serverless/rd.js
@@ -6,7 +6,7 @@ class Rd {
constructor() {
jsTest.log("Going to create and start Rd.");
this.rs = new ReplSetTest({name: "Rd", nodes: 3, useHostName: true});
- this.rs.startSet({journal: "", storageEngine: 'wiredTiger'});
+ this.rs.startSet({storageEngine: 'wiredTiger'});
this.rs.initiate();
jsTest.log("Going to create connection with Rd.");
diff --git a/jstests/serverless/serverlesstest.js b/jstests/serverless/serverlesstest.js
index 448a854e890..3d53e860ef2 100644
--- a/jstests/serverless/serverlesstest.js
+++ b/jstests/serverless/serverlesstest.js
@@ -23,7 +23,7 @@ class ServerlessTest {
jsTest.log("Going to create and start config server.");
this.configRS = new ReplSetTest({name: "configRS", nodes: 3, useHostName: true});
- this.configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
+ this.configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
jsTest.log("Initiate config server before starting mongoq.");
let replConfig = this.configRS.getReplSetConfig();
diff --git a/jstests/sharding/config_rs_change.js b/jstests/sharding/config_rs_change.js
index 34cdcab4661..46a4897996c 100644
--- a/jstests/sharding/config_rs_change.js
+++ b/jstests/sharding/config_rs_change.js
@@ -3,7 +3,7 @@
// of the config replset config during startup.
var configRS = new ReplSetTest({name: "configRS", nodes: 1, useHostName: true});
-configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
+configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
var replConfig = configRS.getReplSetConfig();
replConfig.configsvr = true;
configRS.initiate(replConfig);
diff --git a/jstests/sharding/empty_cluster_init.js b/jstests/sharding/empty_cluster_init.js
index 612cabd1df1..c35c60191d4 100644
--- a/jstests/sharding/empty_cluster_init.js
+++ b/jstests/sharding/empty_cluster_init.js
@@ -9,7 +9,7 @@
//
var configRS = new ReplSetTest({name: "configRS", nodes: 3, useHostName: true});
-configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
+configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
var replConfig = configRS.getReplSetConfig();
replConfig.configsvr = true;
configRS.initiate(replConfig);
diff --git a/jstests/sharding/mongos_wait_csrs_initiate.js b/jstests/sharding/mongos_wait_csrs_initiate.js
index 2b20d2f49e4..d85266037de 100644
--- a/jstests/sharding/mongos_wait_csrs_initiate.js
+++ b/jstests/sharding/mongos_wait_csrs_initiate.js
@@ -2,7 +2,7 @@
// @tags: [multiversion_incompatible]
var configRS = new ReplSetTest({name: "configRS", nodes: 1, useHostName: true});
-configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
+configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'});
var replConfig = configRS.getReplSetConfig();
replConfig.configsvr = true;
var mongos = MongoRunner.runMongos({configdb: configRS.getURL(), waitForConnect: false});
@@ -39,4 +39,4 @@ assert.soon(
jsTestLog("got mongos");
assert.commandWorked(mongos2.getDB('admin').runCommand('serverStatus'));
configRS.stopSet();
-MongoRunner.stopMongos(mongos); \ No newline at end of file
+MongoRunner.stopMongos(mongos);
diff --git a/jstests/sharding/server37750.js b/jstests/sharding/server37750.js
index 902c427c292..b87eaffad5a 100644
--- a/jstests/sharding/server37750.js
+++ b/jstests/sharding/server37750.js
@@ -2,7 +2,7 @@
* Confirms that a sharded $sample which employs the DSSampleFromRandomCursor optimization is
* capable of yielding.
*
- * @tags: [assumes_read_concern_unchanged, do_not_wrap_aggregations_in_facets, requires_journaling,
+ * @tags: [assumes_read_concern_unchanged, do_not_wrap_aggregations_in_facets,
* requires_sharding]
*/
(function() {
@@ -77,4 +77,4 @@ assert.eq(sampleCursor.toArray().length, 3);
// Confirm that the parallel shell completes successfully, and tear down the cluster.
awaitShell();
st.stop();
-})(); \ No newline at end of file
+})();
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index 20d15d07f6c..92533e422f8 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -3,7 +3,7 @@
* to primary (for replica set nodes).
* Note: test will deliberately cause a mongod instance to terminate abruptly and mongod instance
* without journaling will complain about unclean shutdown.
- * @tags: [requires_persistence, requires_journaling]
+ * @tags: [requires_persistence]
*/
(function() {
diff --git a/jstests/sharding/shard_identity_rollback.js b/jstests/sharding/shard_identity_rollback.js
index ba2d43e5acc..9b6d4380c8b 100644
--- a/jstests/sharding/shard_identity_rollback.js
+++ b/jstests/sharding/shard_identity_rollback.js
@@ -1,7 +1,7 @@
/**
* Tests that rolling back the insertion of the shardIdentity document on a shard causes the node
* rolling it back to shut down.
- * @tags: [multiversion_incompatible, requires_persistence, requires_journaling]
+ * @tags: [multiversion_incompatible, requires_persistence]
*/
(function() {
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 1ba6bc5844a..addc5d2901d 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -29,13 +29,9 @@ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_paranoia.jso
// Sharding Role
jsTest.log("Testing \"configsvr\" command line option");
var expectedResult = {
- "parsed": {
- "sharding": {"clusterRole": "configsvr"},
- "replication": {"replSet": "dummy"},
- "storage": {"journal": {"enabled": true}}
- }
+ "parsed": {"sharding": {"clusterRole": "configsvr"}, "replication": {"replSet": "dummy"}}
};
-testGetCmdLineOptsMongod({configsvr: "", journal: "", replSet: "dummy"}, expectedResult);
+testGetCmdLineOptsMongod({configsvr: "", replSet: "dummy"}, expectedResult);
jsTest.log("Testing \"shardsvr\" command line option");
expectedResult = {
diff --git a/jstests/slow1/conc_update.js b/jstests/slow1/conc_update.js
index 0ca9ea81bea..7fa7b270360 100644
--- a/jstests/slow1/conc_update.js
+++ b/jstests/slow1/conc_update.js
@@ -1,7 +1,7 @@
(function() {
"use strict";
-const conn = MongoRunner.runMongod({nojournal: ""});
+const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
db = conn.getDB("concurrency");
db.dropDatabase();
diff --git a/jstests/slow1/initial_sync_many_dbs.js b/jstests/slow1/initial_sync_many_dbs.js
index c042c8190d8..f644db39822 100644
--- a/jstests/slow1/initial_sync_many_dbs.js
+++ b/jstests/slow1/initial_sync_many_dbs.js
@@ -3,13 +3,6 @@
*/
(function() {
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
var name = 'initial_sync_many_dbs';
var num_dbs = 32;
diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js
index dad18abc6ba..ed72b92e960 100644
--- a/jstests/slow1/replsets_priority1.js
+++ b/jstests/slow1/replsets_priority1.js
@@ -5,14 +5,6 @@
"use strict";
-// Skip this test if running with --nojournal and WiredTiger.
-if (jsTest.options().noJournal &&
- (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger")) {
- print("Skipping test because running WiredTiger without journaling isn't a valid" +
- " replica set configuration");
- return;
-}
-
load("jstests/replsets/rslib.js");
var rs = new ReplSetTest({name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}});
diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js
index 44d5462f4ac..0ead420eab2 100644
--- a/jstests/ssl/libs/ssl_helpers.js
+++ b/jstests/ssl/libs/ssl_helpers.js
@@ -130,7 +130,7 @@ function mixedShardTest(options1, options2, shouldSucceed) {
// authentication, so in this test we must make the choice explicitly, based on the global
// test options.
let wcMajorityJournalDefault;
- if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "inMemory") {
+ if (jsTestOptions().storageEngine == "inMemory") {
wcMajorityJournalDefault = false;
} else {
wcMajorityJournalDefault = true;
diff --git a/jstests/ssl/ssl_without_ca.js b/jstests/ssl/ssl_without_ca.js
index 9b0a62a5250..6021e4e6933 100644
--- a/jstests/ssl/ssl_without_ca.js
+++ b/jstests/ssl/ssl_without_ca.js
@@ -52,7 +52,6 @@ var rstOptions = {
};
var startOptions = {
// Ensure that journaling is always enabled for config servers.
- journal: "",
configsvr: "",
storageEngine: "wiredTiger",
sslMode: 'allowSSL',
diff --git a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
index 830b353481c..a3df6b850ad 100644
--- a/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
+++ b/jstests/sslSpecial/upgrade_to_x509_ssl_nossl.js
@@ -19,8 +19,7 @@ load("jstests/ssl/libs/ssl_helpers.js");
// cluster authentication. Choosing the default value for wcMajorityJournalDefault in
// ReplSetTest cannot be done automatically without the shell performing such authentication, so
// in this test we must make the choice explicitly, based on the global test options.
-const wcMajorityJournalDefault =
- !jsTestOptions().noJournal && jsTestOptions().storageEngine != "inMemory";
+const wcMajorityJournalDefault = jsTestOptions().storageEngine != "inMemory";
const opts = {
sslMode: "disabled",
diff --git a/jstests/ssl_x509/initial_sync1_x509.js b/jstests/ssl_x509/initial_sync1_x509.js
index 54366303f76..85b00b1b806 100644
--- a/jstests/ssl_x509/initial_sync1_x509.js
+++ b/jstests/ssl_x509/initial_sync1_x509.js
@@ -16,7 +16,7 @@ function runInitialSyncTest() {
// ReplSetTest cannot be done automatically without the shell performing such authentication, so
// in this test we must make the choice explicitly, based on the global test options.
var wcMajorityJournalDefault;
- if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "inMemory") {
+ if (jsTestOptions().storageEngine == "inMemory") {
wcMajorityJournalDefault = false;
} else {
wcMajorityJournalDefault = true;
diff --git a/jstests/ssl_x509/shell_x509_system_user.js b/jstests/ssl_x509/shell_x509_system_user.js
index adca45975e8..5fc8f7969cb 100644
--- a/jstests/ssl_x509/shell_x509_system_user.js
+++ b/jstests/ssl_x509/shell_x509_system_user.js
@@ -9,7 +9,7 @@
// ReplSetTest cannot be done automatically without the shell performing such authentication, so
// in this test we must make the choice explicitly, based on the global test options.
let wcMajorityJournalDefault;
-if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "inMemory") {
+if (jsTestOptions().storageEngine == "inMemory") {
wcMajorityJournalDefault = false;
} else {
wcMajorityJournalDefault = true;
diff --git a/jstests/ssl_x509/upgrade_to_x509_ssl.js b/jstests/ssl_x509/upgrade_to_x509_ssl.js
index 4fe966685da..e4a7e828da4 100644
--- a/jstests/ssl_x509/upgrade_to_x509_ssl.js
+++ b/jstests/ssl_x509/upgrade_to_x509_ssl.js
@@ -24,7 +24,7 @@ load("jstests/ssl/libs/ssl_helpers.js");
// ReplSetTest cannot be done automatically without the shell performing such authentication, so
// in this test we must make the choice explicitly, based on the global test options.
var wcMajorityJournalDefault;
-if (jsTestOptions().noJournal || jsTestOptions().storageEngine == "inMemory") {
+if (jsTestOptions().storageEngine == "inMemory") {
wcMajorityJournalDefault = false;
} else {
wcMajorityJournalDefault = true;
diff --git a/jstests/watchdog/wd_journal_hang.js b/jstests/watchdog/wd_journal_hang.js
index 7f1ad896ac9..80b6e918eb4 100644
--- a/jstests/watchdog/wd_journal_hang.js
+++ b/jstests/watchdog/wd_journal_hang.js
@@ -1,5 +1,5 @@
// Storage Node Watchdog - validate watchdog monitors --dbpath /journal
-// @tags: [requires_journaling]
+// @tags: [requires_persistence]
//
load("jstests/watchdog/lib/wd_test_common.js");
diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp
index 954761086b3..4c32e81f948 100644
--- a/src/mongo/db/mongod_main.cpp
+++ b/src/mongo/db/mongod_main.cpp
@@ -475,17 +475,6 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) {
exitCleanly(EXIT_BADOPTIONS);
}
- // Disallow running WiredTiger with --nojournal in a replica set
- if (storageGlobalParams.engine == "wiredTiger" && !storageGlobalParams.dur &&
- replSettings.usingReplSets()) {
- LOGV2_ERROR(
- 20535,
- "Running wiredTiger without journaling in a replica set is not supported. Make sure "
- "you are not using --nojournal and that storage.journal.enabled is not set to "
- "'false'");
- exitCleanly(EXIT_BADOPTIONS);
- }
-
if (storageGlobalParams.repair && replSettings.usingReplSets()) {
LOGV2_ERROR(5019200,
"Cannot specify both repair and replSet at the same time (remove --replSet to "
diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp
index 48099fb01e5..b528a315b62 100644
--- a/src/mongo/db/mongod_options.cpp
+++ b/src/mongo/db/mongod_options.cpp
@@ -161,11 +161,6 @@ Status validateMongodOptions(const moe::Environment& params) {
return ret;
}
- if (params.count("nojournal") && params.count("storage.journal.enabled")) {
- return Status(ErrorCodes::BadValue,
- "Can't specify both --journal and --nojournal options.");
- }
-
#ifdef _WIN32
if (params.count("install") || params.count("reinstall")) {
if (params.count("storage.dbPath") &&
@@ -210,18 +205,6 @@ Status canonicalizeMongodOptions(moe::Environment* params) {
return ret;
}
- if (params->count("nojournal")) {
- Status ret =
- params->set("storage.journal.enabled", moe::Value(!(*params)["nojournal"].as<bool>()));
- if (!ret.isOK()) {
- return ret;
- }
- ret = params->remove("nojournal");
- if (!ret.isOK()) {
- return ret;
- }
- }
-
// "security.authorization" comes from the config file, so override it if "auth" is
// set since those come from the command line.
if (params->count("auth")) {
@@ -447,14 +430,7 @@ Status storeMongodOptions(const moe::Environment& params) {
storageGlobalParams.groupCollections = params["storage.groupCollections"].as<bool>();
}
- if (params.count("storage.journal.enabled")) {
- storageGlobalParams.dur = params["storage.journal.enabled"].as<bool>();
- }
-
if (params.count("storage.journal.commitIntervalMs")) {
- // don't check if dur is false here as many will just use the default, and will default
- // to off on win32. ie no point making life a little more complex by giving an error on
- // a dev environment.
auto journalCommitIntervalMs = params["storage.journal.commitIntervalMs"].as<int>();
storageGlobalParams.journalCommitIntervalMs.store(journalCommitIntervalMs);
if (journalCommitIntervalMs < 1 ||
@@ -643,12 +619,6 @@ Status storeMongodOptions(const moe::Environment& params) {
<< "enableMajorityReadConcern=false",
serverGlobalParams.enableMajorityReadConcern);
- // If we haven't explicitly specified a journal option, default journaling to true for
- // the config server role
- if (!params.count("storage.journal.enabled")) {
- storageGlobalParams.dur = true;
- }
-
if (!params.count("storage.dbPath")) {
storageGlobalParams.dbpath = storageGlobalParams.kDefaultConfigDbPath;
}
@@ -691,13 +661,6 @@ Status storeMongodOptions(const moe::Environment& params) {
}
#endif
- // Check if we are 32 bit and have not explicitly specified any journaling options
- if (sizeof(void*) == 4 && !params.count("storage.journal.enabled")) {
- LOGV2_WARNING(20880,
- "32-bit servers don't have journaling enabled by default. Please use "
- "--journal if you want durability");
- }
-
bool isClusterRoleShard = params.count("shardsvr");
bool isClusterRoleConfig = params.count("configsvr");
if (params.count("sharding.clusterRole")) {
diff --git a/src/mongo/db/mongod_options.h b/src/mongo/db/mongod_options.h
index be4d70256cb..a218680bda7 100644
--- a/src/mongo/db/mongod_options.h
+++ b/src/mongo/db/mongod_options.h
@@ -69,8 +69,7 @@ Status validateMongodOptions(const moe::Environment& params);
/**
* Canonicalize mongod options for the given environment.
*
- * For example, the options "dur", "nodur", "journal", "nojournal", and
- * "storage.journaling.enabled" should all be merged into "storage.journaling.enabled".
+ * For example, "nounixsocket" maps to "net.unixDomainSocket.enabled".
*/
Status canonicalizeMongodOptions(moe::Environment* params);
diff --git a/src/mongo/db/mongod_options_sharding.idl b/src/mongo/db/mongod_options_sharding.idl
index f8da615ed84..88213fe5498 100644
--- a/src/mongo/db/mongod_options_sharding.idl
+++ b/src/mongo/db/mongod_options_sharding.idl
@@ -40,7 +40,7 @@ configs:
description: 'Declare this is a config db of a cluster; default port 27019; default dir /data/configdb'
arg_vartype: Switch
source: [ cli, ini ]
- conflicts: [ shardsvr, nojournal ]
+ conflicts: [ shardsvr ]
shardsvr:
description: 'Declare this is a shard db of a cluster; default port 27018'
arg_vartype: Switch
diff --git a/src/mongo/db/mongod_options_storage.idl b/src/mongo/db/mongod_options_storage.idl
index 59d4d0cd8e7..b1c3e041ad4 100644
--- a/src/mongo/db/mongod_options_storage.idl
+++ b/src/mongo/db/mongod_options_storage.idl
@@ -119,18 +119,6 @@ configs:
arg_vartype: Switch
source: [ cli, ini ]
- 'storage.journal.enabled':
- description: 'Enable journaling'
- short_name: journal
- deprecated_short_name: dur
- arg_vartype: Switch
-
- nojournal:
- description: 'Disable journaling (journaling is on by default for 64 bit)'
- deprecated_short_name: nodur
- arg_vartype: Switch
- source: [ cli, ini ]
-
'storage.oplogMinRetentionHours':
description: 'Minimum number of hours to preserve in the oplog. Default is 0 (turned off). Fractions are allowed (e.g. 1.5 hours)'
short_name: oplogMinRetentionHours
diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp
index d897241134b..5eed8504f83 100644
--- a/src/mongo/db/repl/oplog_applier_impl.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl.cpp
@@ -262,9 +262,9 @@ void OplogApplierImpl::_run(OplogBuffer* oplogBuffer) {
invariant(!_replCoord->getMemberState().arbiter());
std::unique_ptr<ApplyBatchFinalizer> finalizer{
- getGlobalServiceContext()->getStorageEngine()->isDurable()
- ? new ApplyBatchFinalizerForJournal(_replCoord)
- : new ApplyBatchFinalizer(_replCoord)};
+ getGlobalServiceContext()->getStorageEngine()->isEphemeral()
+ ? new ApplyBatchFinalizer(_replCoord)
+ : new ApplyBatchFinalizerForJournal(_replCoord)};
while (true) { // Exits on message from OplogBatcher.
// Use a new operation context each iteration, as otherwise we may appear to use a single
diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
index c831d6b31da..dbaddbe09cc 100644
--- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp
+++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp
@@ -170,7 +170,7 @@ void ReplicationConsistencyMarkersImpl::clearInitialSyncFlag(OperationContext* o
"Clearing the truncate point while primary is unsafe: it is asynchronously updated.");
setOplogTruncateAfterPoint(opCtx, Timestamp());
- if (getGlobalServiceContext()->getStorageEngine()->isDurable()) {
+ if (!getGlobalServiceContext()->getStorageEngine()->isEphemeral()) {
JournalFlusher::get(opCtx)->waitForJournalFlush();
replCoord->setMyLastDurableOpTimeAndWallTime(opTimeAndWallTime);
}
diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp
index 9e44ce2d034..0ea77cb39bc 100644
--- a/src/mongo/db/repl/replication_coordinator_impl.cpp
+++ b/src/mongo/db/repl/replication_coordinator_impl.cpp
@@ -4892,7 +4892,7 @@ ReplicationCoordinatorImpl::_setCurrentRSConfig(WithLock lk,
}
}
- // Warn if using the in-memory (ephemeral) storage engine or running running --nojournal with
+ // Warn if using the in-memory (ephemeral) storage engine with
// writeConcernMajorityJournalDefault=true.
StorageEngine* storageEngine = opCtx->getServiceContext()->getStorageEngine();
if (storageEngine && newConfig.getWriteConcernMajorityShouldJournal() &&
@@ -4925,33 +4925,6 @@ ReplicationCoordinatorImpl::_setCurrentRSConfig(WithLock lk,
{logv2::LogTag::kStartupWarnings},
"** available free RAM is exhausted.");
LOGV2_OPTIONS(21386, {logv2::LogTag::kStartupWarnings}, "");
- } else if (!storageEngine->isDurable()) {
- LOGV2_OPTIONS(21369, {logv2::LogTag::kStartupWarnings}, "");
- LOGV2_OPTIONS(
- 21370,
- {logv2::LogTag::kStartupWarnings},
- "** WARNING: This replica set node is running without journaling enabled but the ");
- LOGV2_OPTIONS(
- 21371,
- {logv2::LogTag::kStartupWarnings},
- "** writeConcernMajorityJournalDefault option to the replica set config ");
- LOGV2_OPTIONS(21372,
- {logv2::LogTag::kStartupWarnings},
- "** is set to true. The writeConcernMajorityJournalDefault ");
- LOGV2_OPTIONS(21373,
- {logv2::LogTag::kStartupWarnings},
- "** option to the replica set config must be set to false ");
- LOGV2_OPTIONS(21374,
- {logv2::LogTag::kStartupWarnings},
- "** or w:majority write concerns will never complete.");
- LOGV2_OPTIONS(
- 21375,
- {logv2::LogTag::kStartupWarnings},
- "** In addition, this node's memory consumption may increase until all");
- LOGV2_OPTIONS(21376,
- {logv2::LogTag::kStartupWarnings},
- "** available free RAM is exhausted.");
- LOGV2_OPTIONS(21377, {logv2::LogTag::kStartupWarnings}, "");
}
}
diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp
index d1973e06866..fcd20f10430 100644
--- a/src/mongo/db/startup_warnings_mongod.cpp
+++ b/src/mongo/db/startup_warnings_mongod.cpp
@@ -146,12 +146,7 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
22152,
{logv2::LogTag::kStartupWarnings},
"This is a 32 bit MongoDB binary. 32 bit builds are limited to less than 2GB "
- "of data (or less with --journal). See http://dochub.mongodb.org/core/32bit");
- if (!storageParams.dur) {
- LOGV2_WARNING_OPTIONS(22154,
- {logv2::LogTag::kStartupWarnings},
- "Journaling defaults to off for 32 bit and is currently off");
- }
+ "of data. See http://dochub.mongodb.org/core/32bit");
}
#ifdef __linux__
@@ -218,18 +213,15 @@ void logMongodStartupWarnings(const StorageGlobalParams& storageParams,
}
}
- if (storageParams.dur) {
- std::fstream f("/proc/sys/vm/overcommit_memory", ios_base::in);
- unsigned val;
- f >> val;
+ std::fstream f("/proc/sys/vm/overcommit_memory", ios_base::in);
+ unsigned val;
+ f >> val;
- if (val == 2) {
- LOGV2_OPTIONS(
- 22171,
- {logv2::LogTag::kStartupWarnings},
- "Journaling works best if /proc/sys/vm/overcommit_memory is set to 0 or 1",
- "currentValue"_attr = val);
- }
+ if (val == 2) {
+ LOGV2_OPTIONS(22171,
+ {logv2::LogTag::kStartupWarnings},
+ "Journaling works best if /proc/sys/vm/overcommit_memory is set to 0 or 1",
+ "currentValue"_attr = val);
}
if (boost::filesystem::exists("/proc/sys/vm/zone_reclaim_mode")) {
diff --git a/src/mongo/db/storage/control/storage_control.cpp b/src/mongo/db/storage/control/storage_control.cpp
index c5a0706f3da..fc5534bec69 100644
--- a/src/mongo/db/storage/control/storage_control.cpp
+++ b/src/mongo/db/storage/control/storage_control.cpp
@@ -70,17 +70,13 @@ void startStorageControls(ServiceContext* serviceContext, bool forTestOnly) {
// Ephemeral engines are not durable -- waitUntilDurable() returns early -- but frequent updates
// to replication's JournalListener in the waitUntilDurable() code may help update replication
// timestamps more quickly.
- //
- // (Note: the ephemeral engine returns false for isDurable(), so we must be careful not to
- // disable it.)
if (journalFlusherPaused) {
// This is a restart and the JournalListener was paused. Resume the existing JournalFlusher.
JournalFlusher::get(serviceContext)->resume();
journalFlusherPaused = false;
} else {
std::unique_ptr<JournalFlusher> journalFlusher = std::make_unique<JournalFlusher>(
- /*disablePeriodicFlushes*/ forTestOnly ||
- (!storageEngine->isDurable() && !storageEngine->isEphemeral()));
+ /*disablePeriodicFlushes*/ forTestOnly);
journalFlusher->go();
JournalFlusher::set(serviceContext, std::move(journalFlusher));
}
diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.h b/src/mongo/db/storage/devnull/devnull_kv_engine.h
index 1b7941eca4b..f0a2dddfa14 100644
--- a/src/mongo/db/storage/devnull/devnull_kv_engine.h
+++ b/src/mongo/db/storage/devnull/devnull_kv_engine.h
@@ -116,13 +116,6 @@ public:
return false;
}
- /**
- * devnull does no journaling, so don't report the engine as durable.
- */
- virtual bool isDurable() const {
- return false;
- }
-
virtual bool isEphemeral() const {
return true;
}
diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp
index 46298a87a78..47797efdf48 100644
--- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp
+++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp
@@ -115,9 +115,7 @@ public:
Status repairIdent(OperationContext* opCtx, StringData ident) override {
return Status::OK();
}
- bool isDurable() const override {
- return false;
- }
+
bool isEphemeral() const override {
return false;
}
diff --git a/src/mongo/db/storage/kv/kv_engine.h b/src/mongo/db/storage/kv/kv_engine.h
index 01bb7560151..bca7f550d94 100644
--- a/src/mongo/db/storage/kv/kv_engine.h
+++ b/src/mongo/db/storage/kv/kv_engine.h
@@ -241,8 +241,6 @@ public:
virtual void checkpoint() {}
- virtual bool isDurable() const = 0;
-
/**
* Returns true if the KVEngine is ephemeral -- that is, it is NOT persistent and all data is
* lost after shutdown. Otherwise, returns false.
diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h
index 04101ad66dc..69c36f320a3 100644
--- a/src/mongo/db/storage/recovery_unit.h
+++ b/src/mongo/db/storage/recovery_unit.h
@@ -203,7 +203,7 @@ public:
/**
* Waits until all commits that happened before this call are durable in the journal. Returns
* true, unless the storage engine cannot guarantee durability, which should never happen when
- * isDurable() returned true. This cannot be called from inside a unit of work, and should
+ * the engine is non-ephemeral. This cannot be called from inside a unit of work, and should
* fail if it is. This method invariants if the caller holds any locks, except for repair.
*
* Can throw write interruption errors from the JournalListener.
diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h
index 03be00aa626..1fe8ed0ee39 100644
--- a/src/mongo/db/storage/storage_engine.h
+++ b/src/mongo/db/storage/storage_engine.h
@@ -207,11 +207,6 @@ public:
virtual bool supportsCappedCollections() const = 0;
/**
- * Returns whether the engine supports a journalling concept or not.
- */
- virtual bool isDurable() const = 0;
-
- /**
* Returns true if the engine does not persist data to disk; false otherwise.
*/
virtual bool isEphemeral() const = 0;
diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp
index 3465e9addf4..664a90219bc 100644
--- a/src/mongo/db/storage/storage_engine_impl.cpp
+++ b/src/mongo/db/storage/storage_engine_impl.cpp
@@ -945,10 +945,6 @@ StatusWith<std::deque<std::string>> StorageEngineImpl::extendBackupCursor(Operat
return _engine->extendBackupCursor(opCtx);
}
-bool StorageEngineImpl::isDurable() const {
- return _engine->isDurable();
-}
-
bool StorageEngineImpl::isEphemeral() const {
return _engine->isEphemeral();
}
diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h
index 6d9e308402a..d38021ab2d0 100644
--- a/src/mongo/db/storage/storage_engine_impl.h
+++ b/src/mongo/db/storage/storage_engine_impl.h
@@ -101,8 +101,6 @@ public:
virtual StatusWith<std::deque<std::string>> extendBackupCursor(
OperationContext* opCtx) override;
- virtual bool isDurable() const override;
-
virtual bool isEphemeral() const override;
virtual Status repairRecordStore(OperationContext* opCtx,
diff --git a/src/mongo/db/storage/storage_engine_mock.h b/src/mongo/db/storage/storage_engine_mock.h
index 0591fbca9ad..a7a7078384a 100644
--- a/src/mongo/db/storage/storage_engine_mock.h
+++ b/src/mongo/db/storage/storage_engine_mock.h
@@ -47,9 +47,7 @@ public:
bool supportsCappedCollections() const final {
return true;
}
- bool isDurable() const final {
- return false;
- }
+
bool isEphemeral() const final {
return true;
}
diff --git a/src/mongo/db/storage/storage_options.cpp b/src/mongo/db/storage/storage_options.cpp
index ca2d9de84dc..3736cd69316 100644
--- a/src/mongo/db/storage/storage_options.cpp
+++ b/src/mongo/db/storage/storage_options.cpp
@@ -49,9 +49,6 @@ void StorageGlobalParams::reset() {
repair = false;
restore = false;
- // The intention here is to enable the journal by default if we are running on a 64 bit system.
- dur = (sizeof(void*) == 8);
-
noTableScan.store(false);
directoryperdb = false;
syncdelay = 60.0;
diff --git a/src/mongo/db/storage/storage_options.h b/src/mongo/db/storage/storage_options.h
index 108ae66629c..c10b71a25aa 100644
--- a/src/mongo/db/storage/storage_options.h
+++ b/src/mongo/db/storage/storage_options.h
@@ -80,8 +80,6 @@ struct StorageGlobalParams {
// entries for collections not restored and more.
bool restore;
- bool dur; // --dur durability (now --journal)
-
// Whether the Storage Engine selected should be ephemeral in nature or not.
bool ephemeral = false;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
index 6277a27f62e..63eebef295f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp
@@ -125,7 +125,6 @@ public:
wiredTigerGlobalOptions.engineConfig,
cacheMB,
wiredTigerGlobalOptions.getMaxHistoryFileSizeMB(),
- params.dur,
params.ephemeral,
params.repair);
kv->setRecordStoreExtraOptions(wiredTigerGlobalOptions.collectionConfig);
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
index 419ac09ffa5..472e1225d5a 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp
@@ -304,7 +304,6 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
const std::string& extraOpenOptions,
size_t cacheSizeMB,
size_t maxHistoryFileSizeMB,
- bool durable,
bool ephemeral,
bool repair)
: _clockSource(cs),
@@ -312,14 +311,13 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
_canonicalName(canonicalName),
_path(path),
_sizeStorerSyncTracker(cs, 100000, Seconds(60)),
- _durable(durable),
_ephemeral(ephemeral),
_inRepairMode(repair),
_keepDataHistory(serverGlobalParams.enableMajorityReadConcern) {
_pinnedOplogTimestamp.store(Timestamp::max().asULL());
boost::filesystem::path journalPath = path;
journalPath /= "journal";
- if (_durable) {
+ if (!_ephemeral) {
if (!boost::filesystem::exists(journalPath)) {
try {
boost::filesystem::create_directory(journalPath);
@@ -356,13 +354,19 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
ss << "cache_cursors=false,";
}
- // The setting may have a later setting override it if not using the journal. We make it
- // unconditional here because even nojournal may need this setting if it is a transition
- // from using the journal.
- ss << "log=(enabled=true,remove=true,path=journal,compressor=";
- ss << wiredTigerGlobalOptions.journalCompressor << "),";
- ss << "builtin_extension_config=(zstd=(compression_level="
- << wiredTigerGlobalOptions.zstdCompressorLevel << ")),";
+ if (_ephemeral) {
+ // If we've requested an ephemeral instance we store everything into memory instead of
+ // backing it onto disk. Logging is not supported in this instance, thus we also have to
+ // disable it.
+ ss << ",in_memory=true,log=(enabled=false),";
+ } else {
+ // In persistent mode we enable the journal and set the compression settings.
+ ss << "log=(enabled=true,remove=true,path=journal,compressor=";
+ ss << wiredTigerGlobalOptions.journalCompressor << "),";
+ ss << "builtin_extension_config=(zstd=(compression_level="
+ << wiredTigerGlobalOptions.zstdCompressorLevel << ")),";
+ }
+
ss << "file_manager=(close_idle_time=" << gWiredTigerFileHandleCloseIdleTime
<< ",close_scan_interval=" << gWiredTigerFileHandleCloseScanInterval
<< ",close_handle_minimum=" << gWiredTigerFileHandleCloseMinimum << "),";
@@ -423,60 +427,10 @@ WiredTigerKVEngine::WiredTigerKVEngine(const std::string& canonicalName,
ss << WiredTigerExtensions::get(getGlobalServiceContext())->getOpenExtensionsConfig();
ss << extraOpenOptions;
- if (!_durable) {
- // If we started without the journal, but previously used the journal then open with the
- // WT log enabled to perform any unclean shutdown recovery and then close and reopen in
- // the normal path without the journal.
- if (boost::filesystem::exists(journalPath)) {
- string config = ss.str();
- auto start = Date_t::now();
- LOGV2(22313,
- "Detected WT journal files. Running recovery from last checkpoint. journal to "
- "nojournal transition config",
- "config"_attr = config);
- int ret = wiredtiger_open(
- path.c_str(), _eventHandler.getWtEventHandler(), config.c_str(), &_conn);
- LOGV2(4795911, "Recovery complete", "duration"_attr = Date_t::now() - start);
- if (ret == EINVAL) {
- fassertFailedNoTrace(28717);
- } else if (ret != 0) {
- Status s(wtRCToStatus(ret, nullptr));
- msgasserted(28718, s.reason());
- }
- start = Date_t::now();
- invariantWTOK(_conn->close(_conn, nullptr), nullptr);
- LOGV2(4795910,
- "WiredTiger closed. Removing journal files",
- "duration"_attr = Date_t::now() - start);
- // After successful recovery, remove the journal directory.
- try {
- start = Date_t::now();
- boost::filesystem::remove_all(journalPath);
- } catch (std::exception& e) {
- LOGV2_ERROR(22355,
- "error removing journal dir {directory} {error}",
- "Error removing journal directory",
- "directory"_attr = journalPath.generic_string(),
- "error"_attr = e.what(),
- "duration"_attr = Date_t::now() - start);
- throw;
- }
- LOGV2(4795908, "Journal files removed", "duration"_attr = Date_t::now() - start);
- }
- // This setting overrides the earlier setting because it is later in the config string.
- ss << ",log=(enabled=false),";
- }
-
if (WiredTigerUtil::willRestoreFromBackup()) {
ss << WiredTigerUtil::generateRestoreConfig() << ",";
}
- // If we've requested an ephemeral instance we store everything into memory instead of backing
- // it onto disk. Logging is not supported in this instance, thus we also have to disable it.
- if (_ephemeral) {
- ss << "in_memory=true,log=(enabled=false),";
- }
-
string config = ss.str();
LOGV2(22315, "Opening WiredTiger", "config"_attr = config);
auto startTime = Date_t::now();
@@ -957,8 +911,8 @@ void WiredTigerKVEngine::flushAllFiles(OperationContext* opCtx, bool callerHolds
// operations such as backup, it's imperative that we copy the most up-to-date data files.
syncSizeInfo(true);
- // If there's no journal, we must checkpoint all of the data.
- WiredTigerSessionCache::Fsync fsyncType = _durable
+ // If there's no journal (ephemeral), we must checkpoint all of the data.
+ WiredTigerSessionCache::Fsync fsyncType = !_ephemeral
? WiredTigerSessionCache::Fsync::kCheckpointStableTimestamp
: WiredTigerSessionCache::Fsync::kCheckpointAll;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
index e0db1f3a387..748ec6c4c1d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h
@@ -107,7 +107,6 @@ public:
const std::string& extraOpenOptions,
size_t cacheSizeMB,
size_t maxHistoryFileSizeMB,
- bool durable,
bool ephemeral,
bool repair);
@@ -122,10 +121,6 @@ public:
void checkpoint() override;
- bool isDurable() const override {
- return _durable;
- }
-
bool isEphemeral() const override {
return _ephemeral;
}
@@ -483,8 +478,6 @@ private:
std::unique_ptr<WiredTigerSizeStorer> _sizeStorer;
std::string _sizeStorerUri;
mutable ElapsedTracker _sizeStorerSyncTracker;
-
- bool _durable;
bool _ephemeral; // whether we are using the in-memory mode of the WT engine
const bool _inRepairMode;
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
index e505a961112..ff3350eea0d 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp
@@ -100,7 +100,6 @@ private:
extraStrings,
1,
0,
- true,
false,
_forRepair);
}
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp
index 79737d7ab4e..df5258c462f 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp
@@ -49,7 +49,6 @@ WiredTigerHarnessHelper::WiredTigerHarnessHelper(Options options, StringData ext
_testLoggingSettings(extraStrings.toString()),
1,
0,
- true,
false,
false) {
repl::ReplicationCoordinator::set(
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
index 6b5294edf39..ee6609e710b 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp
@@ -58,7 +58,6 @@ public:
"", // .extraOpenOptions
1, // .cacheSizeMB
0, // .maxCacheOverflowFileSizeMB
- false, // .durable
false, // .ephemeral
false // .repair
) {
@@ -176,7 +175,7 @@ public:
private:
const char* wt_uri = "table:prepare_transaction";
- const char* wt_config = "key_format=S,value_format=S";
+ const char* wt_config = "key_format=S,value_format=S,log=(enabled=false)";
};
TEST_F(WiredTigerRecoveryUnitTestFixture, SetReadSource) {
diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
index 023a4f0abf7..e1db19aa708 100644
--- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
+++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp
@@ -282,13 +282,13 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
// WT_CONNECTION, i.e: replication is on) requires `forceCheckpoint` to be true and journaling
// to be enabled.
if (syncType == Fsync::kCheckpointStableTimestamp && getGlobalReplSettings().usingReplSets()) {
- invariant(_engine->isDurable());
+ invariant(!isEphemeral());
}
// When forcing a checkpoint with journaling enabled, don't synchronize with other
// waiters, as a log flush is much cheaper than a full checkpoint.
if ((syncType == Fsync::kCheckpointStableTimestamp || syncType == Fsync::kCheckpointAll) &&
- _engine->isDurable()) {
+ !isEphemeral()) {
UniqueWiredTigerSession session = getSession();
WT_SESSION* s = session->getSession();
{
@@ -357,7 +357,7 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx,
}
// Use the journal when available, or a checkpoint otherwise.
- if (_engine && _engine->isDurable()) {
+ if (!isEphemeral()) {
invariantWTOK(_waitUntilDurableSession->log_flush(_waitUntilDurableSession, "sync=on"),
_waitUntilDurableSession);
LOGV2_DEBUG(22419, 4, "flushed journal");
diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp
index a0255dc5aa6..1d40287112f 100644
--- a/src/mongo/db/write_concern.cpp
+++ b/src/mongo/db/write_concern.cpp
@@ -167,7 +167,7 @@ StatusWith<WriteConcernOptions> extractWriteConcern(OperationContext* opCtx,
Status validateWriteConcern(OperationContext* opCtx, const WriteConcernOptions& writeConcern) {
if (writeConcern.syncMode == WriteConcernOptions::SyncMode::JOURNAL &&
- !opCtx->getServiceContext()->getStorageEngine()->isDurable()) {
+ opCtx->getServiceContext()->getStorageEngine()->isEphemeral()) {
return Status(ErrorCodes::BadValue,
"cannot use 'j' option when a host does not have journaling enabled");
}
@@ -291,9 +291,7 @@ Status waitForWriteConcern(OperationContext* opCtx,
break;
case WriteConcernOptions::SyncMode::FSYNC: {
waitForNoOplogHolesIfNeeded(opCtx);
- if (!storageEngine->isDurable()) {
- storageEngine->flushAllFiles(opCtx, /*callerHoldsReadLock*/ false);
-
+ if (!storageEngine->isEphemeral()) {
// This field has had a dummy value since MMAP went away. It is undocumented.
// Maintaining it so as not to cause unnecessary user pain across upgrades.
result->fsyncFiles = 1;
diff --git a/src/mongo/dbtests/framework_options.idl b/src/mongo/dbtests/framework_options.idl
index e84811a5f71..94f170f83aa 100644
--- a/src/mongo/dbtests/framework_options.idl
+++ b/src/mongo/dbtests/framework_options.idl
@@ -62,12 +62,6 @@ configs:
description: Verbose
arg_vartype: Switch
single_name: v
- dur:
- description: 'Enable journaling (currently the default)'
- arg_vartype: Switch
- nodur:
- description: 'Disable journaling'
- arg_vartype: Switch
seed:
description: 'Random number seed'
arg_vartype: UnsignedLongLong
diff --git a/src/mongo/embedded/embedded_options.h b/src/mongo/embedded/embedded_options.h
index 0c188efcd6d..efd05e93095 100644
--- a/src/mongo/embedded/embedded_options.h
+++ b/src/mongo/embedded/embedded_options.h
@@ -41,8 +41,8 @@ Status addOptions(optionenvironment::OptionSection* options);
/**
* Canonicalize options for the given environment.
*
- * For example, the options "dur", "nodur", "journal", "nojournal", and
- * "storage.journaling.enabled" should all be merged into "storage.journaling.enabled".
+ * For example, "--verbose" string argument and the "-vvvv" argument.
+ *
*/
Status canonicalizeOptions(optionenvironment::Environment* params);
diff --git a/src/mongo/s/mongos_options.h b/src/mongo/s/mongos_options.h
index a827d04550d..538f82bd965 100644
--- a/src/mongo/s/mongos_options.h
+++ b/src/mongo/s/mongos_options.h
@@ -75,8 +75,8 @@ Status validateMongosOptions(const moe::Environment& params);
/**
* Canonicalize mongos options for the given environment.
*
- * For example, the options "dur", "nodur", "journal", "nojournal", and
- * "storage.journaling.enabled" should all be merged into "storage.journaling.enabled".
+ * For example, the options "noscripting" and "security.javascriptEnabled" and should all be merged
+ * into "security.javascriptEnabled".
*/
Status canonicalizeMongosOptions(moe::Environment* params);
diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js
index 3b763b6ec26..5dd402a89d7 100644
--- a/src/mongo/shell/replsettest.js
+++ b/src/mongo/shell/replsettest.js
@@ -231,24 +231,14 @@ var ReplSetTest = function(opts) {
*/
function _isRunningWithoutJournaling(conn) {
var result = asCluster(conn, function() {
+ // Persistent storage engines (WT) can only run with journal enabled.
var serverStatus = assert.commandWorked(conn.adminCommand({serverStatus: 1}));
if (serverStatus.storageEngine.hasOwnProperty('persistent')) {
- if (!serverStatus.storageEngine.persistent) {
- return true;
+ if (serverStatus.storageEngine.persistent) {
+ return false;
}
- } else if (serverStatus.storageEngine.name == 'inMemory') {
- return true;
}
- var cmdLineOpts = assert.commandWorked(conn.adminCommand({getCmdLineOpts: 1}));
- var getWithDefault = function(dict, key, dflt) {
- if (dict[key] === undefined)
- return dflt;
- return dict[key];
- };
- return !getWithDefault(
- getWithDefault(getWithDefault(cmdLineOpts.parsed, "storage", {}), "journal", {}),
- "enabled",
- true);
+ return true;
});
return result;
}
diff --git a/src/mongo/shell/servers.js b/src/mongo/shell/servers.js
index c48527ba68f..c31bc887c69 100644
--- a/src/mongo/shell/servers.js
+++ b/src/mongo/shell/servers.js
@@ -331,7 +331,6 @@ MongoRunner.logicalOptions = {
noReplSet: true,
forgetPort: true,
arbiter: true,
- noJournal: true,
binVersion: true,
waitForConnect: true,
bridgeOptions: true,
@@ -666,7 +665,6 @@ var _removeSetParameterIfBeforeVersion = function(
* useLogFiles {boolean}: use with logFile option.
* logFile {string}: path to the log file. If not specified and useLogFiles
* is true, automatically creates a log file inside dbpath.
- * noJournal {boolean}
* keyFile
* replSet
* oplogSize
@@ -716,11 +714,6 @@ MongoRunner.mongodOptions = function(opts = {}) {
opts.logpath = opts.logFile;
}
- if ((jsTestOptions().noJournal || opts.noJournal) && !('journal' in opts) &&
- !('configsvr' in opts)) {
- opts.nojournal = "";
- }
-
if (jsTestOptions().keyFile && !opts.keyFile) {
opts.keyFile = jsTestOptions().keyFile;
}
diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js
index 08ffea17f3c..465ee647492 100644
--- a/src/mongo/shell/shardingtest.js
+++ b/src/mongo/shell/shardingtest.js
@@ -1090,7 +1090,6 @@ var ShardingTest = function(params) {
// Allow specifying mixed-type options like this:
// { mongos : [ { bind_ip : "localhost" } ],
- // config : [ { nojournal : "" } ],
// shards : { rs : true, d : true } }
if (Array.isArray(numShards)) {
for (var i = 0; i < numShards.length; i++) {
@@ -1328,7 +1327,6 @@ var ShardingTest = function(params) {
var startOptions = {
pathOpts: pathOpts,
// Ensure that journaling is always enabled for config servers.
- journal: "",
configsvr: "",
storageEngine: "wiredTiger",
};
diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js
index 81c40af4820..d7b01ba7c5b 100644
--- a/src/mongo/shell/utils.js
+++ b/src/mongo/shell/utils.js
@@ -328,7 +328,6 @@ jsTestOptions = function() {
wiredTigerEngineConfigString: TestData.wiredTigerEngineConfigString,
wiredTigerCollectionConfigString: TestData.wiredTigerCollectionConfigString,
wiredTigerIndexConfigString: TestData.wiredTigerIndexConfigString,
- noJournal: TestData.noJournal,
auth: TestData.auth,
// Note: keyFile is also used as a flag to indicate cluster auth is turned on, set it
// to a truthy value if you'd like to do cluster auth, even if it's not keyFile auth.
diff --git a/src/mongo/watchdog/watchdog_mongod.cpp b/src/mongo/watchdog/watchdog_mongod.cpp
index 617ec245bc6..9b423069b87 100644
--- a/src/mongo/watchdog/watchdog_mongod.cpp
+++ b/src/mongo/watchdog/watchdog_mongod.cpp
@@ -156,22 +156,19 @@ void startWatchdog(ServiceContext* service) {
checks.push_back(std::move(dataCheck));
- // Add a check for the journal if it is not disabled
- if (storageGlobalParams.dur) {
- auto journalDirectory = boost::filesystem::path(storageGlobalParams.dbpath);
- journalDirectory /= "journal";
-
- if (boost::filesystem::exists(journalDirectory)) {
- auto journalCheck = std::make_unique<DirectoryCheck>(journalDirectory);
-
- checks.push_back(std::move(journalCheck));
- } else {
- LOGV2_WARNING(23835,
- "Watchdog is skipping check for journal directory since it does not "
- "exist: '{journalDirectory_generic_string}'",
- "journalDirectory_generic_string"_attr =
- journalDirectory.generic_string());
- }
+ // Check for the journal.
+ auto journalDirectory = boost::filesystem::path(storageGlobalParams.dbpath);
+ journalDirectory /= "journal";
+
+ if (boost::filesystem::exists(journalDirectory)) {
+ auto journalCheck = std::make_unique<DirectoryCheck>(journalDirectory);
+
+ checks.push_back(std::move(journalCheck));
+ } else {
+ LOGV2_WARNING(23835,
+ "Watchdog is skipping check for journal directory since it does not "
+ "exist: '{journalDirectory_generic_string}'",
+ "journalDirectory_generic_string"_attr = journalDirectory.generic_string());
}
// If the user specified a log path, also monitor that directory.