summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEtienne Petrel <etienne.petrel@mongodb.com>2022-03-15 05:15:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-03-15 05:45:32 +0000
commitb5148d29b4c4dd48c21233bcf387b2fa52ee1902 (patch)
treed082c71cf68f48737c8a36bddcb13ee9bb4ff95b
parent8a754c04beadb811e089c095d16d9c75a448db91 (diff)
downloadmongo-b5148d29b4c4dd48c21233bcf387b2fa52ee1902.tar.gz
Import wiredtiger: e28c2c63bd2448d26047d3531ea59905b57668b6 from branch mongodb-master
ref: fb7a60b648..e28c2c63bd for: 6.0.0 WT-8934 WiredTiger standalone documentation review
-rw-r--r--src/third_party/wiredtiger/dist/api_data.py6
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_docs15
-rw-r--r--src/third_party/wiredtiger/import.data2
-rw-r--r--src/third_party/wiredtiger/src/docs/arch-backup.dox2
-rw-r--r--src/third_party/wiredtiger/src/docs/arch-log-file.dox2
-rw-r--r--src/third_party/wiredtiger/src/docs/backup.dox23
-rw-r--r--src/third_party/wiredtiger/src/docs/durability-checkpoint.dox (renamed from src/third_party/wiredtiger/src/docs/checkpoint.dox)43
-rw-r--r--src/third_party/wiredtiger/src/docs/durability-log.dox (renamed from src/third_party/wiredtiger/src/docs/durability.dox)77
-rw-r--r--src/third_party/wiredtiger/src/docs/durability-overview.dox (renamed from src/third_party/wiredtiger/src/docs/explain-durability.dox)2
-rw-r--r--src/third_party/wiredtiger/src/docs/explain-acid.dox10
-rw-r--r--src/third_party/wiredtiger/src/docs/programming.dox6
-rw-r--r--src/third_party/wiredtiger/src/docs/timestamp-global.dox53
-rw-r--r--src/third_party/wiredtiger/src/docs/timestamp-misc.dox26
-rw-r--r--src/third_party/wiredtiger/src/docs/timestamp-model.dox2
-rw-r--r--src/third_party/wiredtiger/src/docs/timestamp-prepare-roundup.dox109
-rw-r--r--src/third_party/wiredtiger/src/docs/timestamp-prepare.dox9
-rw-r--r--src/third_party/wiredtiger/src/docs/timestamp-txn.dox39
-rw-r--r--src/third_party/wiredtiger/src/docs/transactions.dox30
-rw-r--r--src/third_party/wiredtiger/src/docs/transactions_api.dox10
-rw-r--r--src/third_party/wiredtiger/src/docs/tune-cache.dox2
-rw-r--r--src/third_party/wiredtiger/src/docs/tune-durability.dox16
-rw-r--r--src/third_party/wiredtiger/src/docs/verbose-messaging.dox2
-rw-r--r--src/third_party/wiredtiger/src/include/wiredtiger.in12
23 files changed, 257 insertions, 241 deletions
diff --git a/src/third_party/wiredtiger/dist/api_data.py b/src/third_party/wiredtiger/dist/api_data.py
index 1c17380a6b5..d6b8279bc37 100644
--- a/src/third_party/wiredtiger/dist/api_data.py
+++ b/src/third_party/wiredtiger/dist/api_data.py
@@ -1877,8 +1877,8 @@ methods = {
type='list'),
Config('use_timestamp', 'true', r'''
if true (the default), create the checkpoint as of the last stable timestamp if timestamps
- are in use, or all current updates if there is no stable timestamp set. If false, this
- option generates a checkpoint with all updates including those later than the timestamp''',
+ are in use, or with all committed updates if there is no stable timestamp set. If false,
+ always generate a checkpoint with all committed updates, ignoring any stable timestamp''',
type='boolean'),
]),
@@ -1982,7 +1982,7 @@ methods = {
stable timestamp. See @ref timestamp_global_api'''),
Config('stable_timestamp', '', r'''
checkpoints will not include commits that are newer than the specified
- timestamp in tables configured with \c log=(enabled=false).
+ timestamp in tables configured with \c "log=(enabled=false)".
Values must be monotonically increasing, any attempt to set the value to
older than the current is silently ignored. The value must
not be older than the current oldest timestamp. See
diff --git a/src/third_party/wiredtiger/dist/s_docs b/src/third_party/wiredtiger/dist/s_docs
index 0f74d850711..618f0d04863 100755
--- a/src/third_party/wiredtiger/dist/s_docs
+++ b/src/third_party/wiredtiger/dist/s_docs
@@ -122,11 +122,24 @@ valid_build()
echo 'Unreferenced page: see docs/pages.html for the list.'
e=1
}
+
classf=`ls ../docs/struct___* 2>/dev/null`
for c in $classf; do
echo "$c: Add class to PREDEFINED in src/docs/$doxyfile, then remove docs/*.{html,js} and rebuild"
-
done
+
+ # Complain if \c markdown isn't right.
+ grep '=</code>(' ../docs/*.html > $t
+ test -s $t && {
+ echo "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+ echo "Some <code> markup fragments are not correctly quoted, the output switched into"
+ echo "typewrite format and then switched out of typewrite format before the fragment"
+ echo "was complete. Probably a \\\c command that didn't span a parenthesis character."
+ echo
+ cat $t
+ echo "=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-="
+ e=1
+ }
}
check_docs_data()
diff --git a/src/third_party/wiredtiger/import.data b/src/third_party/wiredtiger/import.data
index ce47de73b4b..c32f2fe2c4e 100644
--- a/src/third_party/wiredtiger/import.data
+++ b/src/third_party/wiredtiger/import.data
@@ -2,5 +2,5 @@
"vendor": "wiredtiger",
"github": "wiredtiger/wiredtiger.git",
"branch": "mongodb-master",
- "commit": "fb7a60b6486c8c294c45cdc07fb1c3db86eb7424"
+ "commit": "e28c2c63bd2448d26047d3531ea59905b57668b6"
}
diff --git a/src/third_party/wiredtiger/src/docs/arch-backup.dox b/src/third_party/wiredtiger/src/docs/arch-backup.dox
index 9b939ad4a47..878d128c8ab 100644
--- a/src/third_party/wiredtiger/src/docs/arch-backup.dox
+++ b/src/third_party/wiredtiger/src/docs/arch-backup.dox
@@ -51,7 +51,7 @@ renamed or imported into the database after the incremental backup cursor is ope
When the backup cursor is opened with the \c target configuration string as \c "target=(\"log:\\")"
the log-based incremental backup is performed by adding all the existing log files in the database
to the list of files that needs to be copied. Applications wanting to use log files for incremental
-backup must first disable automatic log file removal using the \c log=(remove=false) configuration
+backup must first disable automatic log file removal using the \c "log=(remove=false)" configuration
to ::wiredtiger_open. By default, WiredTiger automatically removes log files no longer required
for recovery. Refer to @ref backup_incremental for more information on how to use the log-based
incremental backup.
diff --git a/src/third_party/wiredtiger/src/docs/arch-log-file.dox b/src/third_party/wiredtiger/src/docs/arch-log-file.dox
index a4d544d595f..b8659a41c34 100644
--- a/src/third_party/wiredtiger/src/docs/arch-log-file.dox
+++ b/src/third_party/wiredtiger/src/docs/arch-log-file.dox
@@ -49,7 +49,7 @@ A hole can be generated in a log file if a buffer with a later LSN is written be
a buffer with an earlier LSN. That can also happen at a log file boundary and that is why
knowing the LSN at the end of the previous log file is critical to recovery.
-The user can choose the maximum log file size via \c log=(file_max=size) configuration
+The user can choose the maximum log file size via \c "log=(file_max=size)" configuration
to the ::wiredtiger_open call. Records written in the log are varying length
depending on the data written. In typical usage, the system will choose to switch
log files before writing a log buffer that exceeds the configured file size. However it
diff --git a/src/third_party/wiredtiger/src/docs/backup.dox b/src/third_party/wiredtiger/src/docs/backup.dox
index 087bbb9273e..fcaac5de705 100644
--- a/src/third_party/wiredtiger/src/docs/backup.dox
+++ b/src/third_party/wiredtiger/src/docs/backup.dox
@@ -1,13 +1,12 @@
/*! @class doc_bulk_durability
-Bulk loads are not commit-level durable, that is, the creation and
-bulk-load of an object will not appear in the database log files.\ For
-this reason, applications doing incremental backups after a full backup
-should repeat the full backup step after doing a bulk load to make the
-bulk load durable.\ In addition, incremental backups after a bulk load
-(without an intervening full backup)
-can cause recovery to report errors because there are log records that
-apply to data files which do not appear in the backup.
+Bulk loads are not commit-level durable, that is, the creation and bulk-load
+of an object will not appear in the database log files. For this reason,
+applications doing incremental backups after a full backup should repeat the
+full backup step after doing a bulk load to make the bulk load appear in
+the backup. In addition, incremental backups after a bulk load (without an
+intervening full backup) can cause recovery to report errors because there
+are log records that apply to data files which do not appear in the backup.
*/
@@ -125,13 +124,13 @@ The following is the procedure for incrementally backing up a database
using block modifications:
1. Perform a full backup of the database (as described above), with the
-additional configuration \c incremental=(enabled=true,this_id="ID1").
+additional configuration <tt>incremental=(enabled=true,this_id="ID1")</tt>.
The identifier specified in \c this_id starts block tracking and that
identifier can be used in the future as the source of an incremental
backup. Identifiers can be any text string, but should be unique.
2. Begin the incremental backup by opening a backup cursor with the
-\c backup: URI and config string of \c incremental=(src_id="ID1",this_id="ID2").
+\c backup: URI and config string of <tt>incremental=(src_id="ID1",this_id="ID2")</tt>.
Call this \c backup_cursor. Like a normal full backup cursor,
this cursor will return the filename as the key. There is no associated
value. The information returned will be based on blocks tracked since the time of
@@ -144,7 +143,7 @@ for details.
3. For each file returned by \c backup_cursor->next(), open a duplicate
backup cursor to do the incremental backup on that file. The list
returned will also include log files (prefixed by \c WiredTigerLog) that need to
-be copied. Configure that duplicate cursor with \c incremental=(file=name).
+be copied. Configure that duplicate cursor with \c "incremental=(file=name)".
The \c name comes from the string returned from \c backup_cursor->get_key().
Call this incr_cursor.
@@ -211,7 +210,7 @@ Incremental backups may also save time when the tables are very large.
By default, WiredTiger automatically removes log files no longer
required for recovery. Applications wanting to use log files for
incremental backup must first disable automatic log file removal using
-the \c log=(remove=false) configuration to ::wiredtiger_open.
+the \c "log=(remove=false)" configuration to ::wiredtiger_open.
The following is the procedure for incrementally backing up a database
and removing log files from the original database home:
diff --git a/src/third_party/wiredtiger/src/docs/checkpoint.dox b/src/third_party/wiredtiger/src/docs/durability-checkpoint.dox
index 4b9c9c00350..70d8f92c35f 100644
--- a/src/third_party/wiredtiger/src/docs/checkpoint.dox
+++ b/src/third_party/wiredtiger/src/docs/durability-checkpoint.dox
@@ -1,4 +1,4 @@
-/*! @page checkpoint Checkpoint durability
+/*! @page durability_checkpoint Checkpoint-level durability
WiredTiger supports checkpoint durability by default, and optionally
commit-level durability when logging is enabled. In most applications,
@@ -6,22 +6,22 @@ commit-level durability impacts performance more than checkpoint
durability; checkpoints offer basic operation durability across
application or system failure without impacting performance (although
the creation of each checkpoint is a relatively heavy-weight operation).
-See @ref durability for information on commit-level durability.
+See @ref durability_log for information on commit-level durability.
@section checkpoint_snapshot Checkpoints vs. snapshots
-Since the terms "checkpoint" and "snapshot" are widely used in this manual, a
-quick explanation of the difference: a checkpoint is an on-disk entity that
+Here is a brief explanation of the terms "checkpoint" and "snapshot", as
+they are widely used in this manual. A checkpoint is an on-disk entity that
captures the persistent state of some or all of the database, while a snapshot
is a lightweight in-memory entity that captures the current state of pending
-updates in the cache. Isolation refers to snapshots, because isolation is about
-runtime state and which updates can be seen by other threads' transactions as
-they run. Durability refers to checkpoints, because durability is about on-disk
-persistence. The two concepts are closely connected, of course; when a
-checkpoint is created the code involved uses a snapshot to determine which
-updates should and should not appear in the checkpoint.
+updates in the cache. Isolation refers to snapshots, because isolation is about
+runtime state and which updates can be seen by other threads' transactions
+as they run. Durability refers to checkpoints, because durability is about
+on-disk persistence. The two concepts are closely connected, of course;
+when a checkpoint is created the code involved uses a snapshot to determine
+which updates should and should not appear in the checkpoint.
-@section checkpoints Checkpoints
+@section checkpoint_checkpoints Checkpoints
A checkpoint is automatically created for each individual file whenever the last
reference to a modified data source is closed.
@@ -40,11 +40,11 @@ Data sources that are involved in an exclusive operation when the checkpoint
starts, including bulk load, upgrade or salvage, will be skipped by the
checkpoint.
-When a data source is first opened, it appears in the same state it was in when
-it was most recently checkpointed. In other words, updates after the most recent
-checkpoint will not appear in the data source at checkpoint-level durability.
-If no checkpoint is found when the data source is opened, the data source will
-appear empty.
+When a data source is first opened, it appears in the same state it was
+in when it was most recently checkpointed. In other words, updates after
+the most recent checkpoint (for example, in the case of failure), will not
+appear in the data source at checkpoint-level durability. If no checkpoint
+is found when the data source is opened, the data source will appear empty.
@subsection checkpoint_target Checkpointing specific objects
@@ -109,11 +109,12 @@ data source's checkpoints, with time stamp, in a human-readable format.
Backups are done using backup cursors (see @ref backup for more information).
\warning
-When applications are using checkpoint-level durability, checkpoints taken while
-a backup cursor is open are not durable. That is, if a crash occurs when a
-backup cursor is open, then the system will be restored to the most recent
-checkpoint prior to the opening of the backup cursor, even if later database
-checkpoints were completed.
+When applications are using checkpoint-level durability, checkpoints taken
+while a backup cursor is open are not durable. That is, if a crash occurs
+when a backup cursor is open, then the system will be restored to the most
+recent checkpoint prior to the opening of the backup cursor, even if later
+database checkpoints were completed. As soon as the backup cursor is closed,
+the system will again be restored to the most recent checkpoint taken.
Applications using commit-level durability retain durability via the write-ahead
log even though checkpoints taken while a backup cursor is open are not durable.
diff --git a/src/third_party/wiredtiger/src/docs/durability.dox b/src/third_party/wiredtiger/src/docs/durability-log.dox
index 914dee827e6..0676880f45d 100644
--- a/src/third_party/wiredtiger/src/docs/durability.dox
+++ b/src/third_party/wiredtiger/src/docs/durability-log.dox
@@ -1,21 +1,31 @@
-/*! @page durability Commit-level durability
+/*! @page durability_log Commit-level durability
The next level of WiredTiger transactional application involves adding
-commit-level durability for data modifications. As described in @ref checkpoint,
-WiredTiger supports checkpoint durability by default. Commit-level durability
-requires additional configuration.
+commit-level durability for data modifications. As described in
+@ref durability_checkpoint, WiredTiger supports checkpoint durability
+by default. Commit-level durability requires additional configuration.
@section commit_durability_enable Enabling commit-level durability
-To enable commit-level durability, pass the \c log=(enabled) configuration
-string to ::wiredtiger_open. This causes WiredTiger to write records into the
-log for each transaction, giving all objects opened in the database commit-level
-durability. The operational transactional API does not otherwise change.
+To enable commit-level durability, pass the <tt>log=(enabled)</tt>
+configuration string to ::wiredtiger_open. This causes WiredTiger to write
+records into the log for each transaction, giving all objects opened in the
+database commit-level durability. The operational transactional API does
+not otherwise change.
+
+\warning
+By default, log records are written to an in-memory buffer before
+WT_SESSION::commit_transaction returns, giving the highest performance but
+not ensuring immediate durability. The database can be configured to flush
+log records to the operating system buffer cache (ensuring durability over
+application failure), or to stable storage (ensuring durability over system
+failure), but that will impact performance.
+See @ref commit_durability_flush_config for more information.
It is possible to enable commit-level durability for some database
objects and not others.
-To do this, one must pass \c log=(enabled) to ::wiredtiger_open and
-then pass \c log=(enabled=false) to WT_SESSION::create for the objects
+To do this, one must pass \c "log=(enabled)" to ::wiredtiger_open and
+then pass \c "log=(enabled=false)" to WT_SESSION::create for the objects
that should continue to use checkpoint durability.
(Doing the converse is not supported, that is,
enabling logging on some tables while leaving the global switch turned
@@ -47,12 +57,12 @@ transaction is committed.
By default, log records are buffered in memory and not flushed to disk
immediately, even when committed; groups of transactions are flushed
together.
-(See @ref durability_group_commit.)
+(See @ref commit_durability_group_commit.)
It is possible to flush transactions to disk more aggressively if
desired.
-See @ref durability_flush_config.
+See @ref commit_durability_flush_config.
-@section durability_recovery Recovery
+@section commit_durability_recovery Recovery
When the transactional log is enabled, calling ::wiredtiger_open
automatically performs a recovery step when opening the database.
@@ -67,7 +77,7 @@ Therefore, applications using commit-level durability must configure
extensions with the \c extensions keyword to ::wiredtiger_open consistently
whenever re-opening the database.
-@section durability_checkpoint Checkpoints
+@section commit_durability_checkpoint Checkpoints
When using commit-level durability one should still perform
checkpoints of the database.
@@ -80,15 +90,16 @@ be processed.
Checkpoints can be done either explicitly by the application or periodically
based on elapsed time or data size with the \c checkpoint configuration to
-::wiredtiger_open. The period between checkpoints can be defined in seconds via
-\c wait, as the number of bytes written to the log since the last checkpoint via
-\c log_size, or both. If both periods are defined then the checkpoint occurs as
-soon as either threshold has occurred and both are reset once the checkpoint is
-complete. If using \c log_size to scheduled automatic checkpoints, we recommend
-the size selected be a multiple of the physical size of the underlying log file
-to more easily support automatic log file removal.
+::wiredtiger_open. The period between checkpoint completion and the start of
+a subsequent checkpoint can be set in seconds via \c wait, as the number of
+bytes written to the log since the last checkpoint via \c log_size, or both.
+If both are set then the checkpoint occurs as soon as either threshold has
+occurred and both are reset once the checkpoint is complete. If using
+\c log_size to scheduled automatic checkpoints, we recommend the size selected
+be a multiple of the physical size of the underlying log file to more easily
+support automatic log file removal.
-@section durability_backup Backups
+@section commit_durability_backup Backups
Backups are done using backup cursors (see @ref backup for more information).
@@ -98,20 +109,20 @@ files referenced in the logs might not be found. Applications should either
copy all objects and log files if commit-level durability of the copied database
is required, or alternatively, copy only selected objects when backing up and
not copy log files at all, then fall back to checkpoint durability when
-switching to the backup.
+activating the backup.
-@section durability_archiving Log file archival and removal
+@section commit_durability_archiving Log file archival and removal
WiredTiger log files are named "WiredTigerLog.[number]" where "[number]"
-is a 10-digit value, for example WiredTigerLog.0000000001". The log
-file with the largest number in its name is the most recent log file
-written. The log file size can be set using the \c log configuration
+is a 10-digit value, for example <tt>WiredTigerLog.0000000001</tt>.
+The log file with the largest number in its name is the most recent log
+file written. The log file size can be set using the \c log configuration
to ::wiredtiger_open.
By default, WiredTiger automatically removes log files no longer required for
recovery. Applications wanting to archive log files instead (for example, to
support catastrophic recovery), must disable log file removal using the
-::wiredtiger_open \c log=(remove=false) configuration.
+::wiredtiger_open \c "log=(remove=false)" configuration.
Log files may be removed or archived after a checkpoint has completed,
as long as there is no backup in progress. When performing @ref
@@ -122,7 +133,7 @@ Immediately after the checkpoint has completed, only the most recent log file
is needed for recovery, and all other log files can be removed or archived.
Note that there must always be at least one log file for the database.
-@section log_cursors Log cursors
+@section commit_log_cursors Log cursors
Applications can independently read and write WiredTiger log files for their own
purposes (for example, inserting debugging records), using the standard
@@ -134,16 +145,16 @@ Applications manually removing log files should take care that no log cursors
are opened in the log when removing files or errors may occur when trying to
read a log record in a file that was removed.
-@section durability_bulk Bulk loads
+@section commit_durability_bulk Bulk loads
@copydoc doc_bulk_durability
-@section durability_tuning Tuning commit-level durability
+@section commit_durability_tuning Tuning commit-level durability
-@subsection durability_group_commit Group commit
+@subsection commit_durability_group_commit Group commit
@copydoc doc_tune_durability_group_commit
-@subsection durability_flush_config Flush call configuration
+@subsection commit_durability_flush_config Flush call configuration
@copydoc doc_tune_durability_flush_config
*/
diff --git a/src/third_party/wiredtiger/src/docs/explain-durability.dox b/src/third_party/wiredtiger/src/docs/durability-overview.dox
index 660e136ff83..56986e982a1 100644
--- a/src/third_party/wiredtiger/src/docs/explain-durability.dox
+++ b/src/third_party/wiredtiger/src/docs/durability-overview.dox
@@ -1,4 +1,4 @@
-/*! @page explain_durability Tutorial: durability models
+/*! @page durability_overview Durability overview
<i>Durability</i> refers to the property that a transaction, once committed,
should be permanent and changes will never be lost. This can mean a number of
diff --git a/src/third_party/wiredtiger/src/docs/explain-acid.dox b/src/third_party/wiredtiger/src/docs/explain-acid.dox
index a39e8ce7fbe..e633bc94a9b 100644
--- a/src/third_party/wiredtiger/src/docs/explain-acid.dox
+++ b/src/third_party/wiredtiger/src/docs/explain-acid.dox
@@ -3,13 +3,13 @@
Transactions provide a powerful abstraction for multiple threads to
operate on data concurrently because they have the following properties:
-- Atomicity: all or none of a transaction is completed.
-- Consistency: if each transaction maintains some property when considered
+- \b Atomicity: all or none of a transaction is completed.
+- \b Consistency: if each transaction maintains some property when considered
separately, then the combined effect of executing the transactions
concurrently will maintain the same property.
-- Isolation: every execution of a transaction is equivalent to one
+- \b Isolation: every execution of a transaction is equivalent to one
that runs single-threaded.
-- Durability: once a transaction is fully complete, changes it made
+- \b Durability: once a transaction is fully complete, changes it made
cannot be lost.
These properties greatly simplify the reasoning required for
@@ -19,7 +19,7 @@ single-threaded and independently.
When the data operations in a transaction are complete, the
application will normally <i>commit</i> it.
-This causes it to "take effect".
+This causes it to "take effect."
In keeping with the atomicity property, a transaction that fails
during execution, or that fails to commit due to a conflict with
another running transaction, is then <i>aborted</i> and any changes
diff --git a/src/third_party/wiredtiger/src/docs/programming.dox b/src/third_party/wiredtiger/src/docs/programming.dox
index 43925ad894b..0e23ce2bf18 100644
--- a/src/third_party/wiredtiger/src/docs/programming.dox
+++ b/src/third_party/wiredtiger/src/docs/programming.dox
@@ -19,8 +19,9 @@ each of which is ordered by one or more columns.
<h2>Transactions</h2>
- @subpage transactions
- @subpage transactions_api
-- @subpage checkpoint
-- @subpage durability
+- @subpage durability_overview
+- @subpage durability_checkpoint
+- @subpage durability_log
- @subpage timestamp_model
- @subpage timestamp_global_api
- @subpage timestamp_txn_api
@@ -28,7 +29,6 @@ each of which is ordered by one or more columns.
- @subpage timestamp_prepare_roundup
- @subpage timestamp_misc
- @subpage explain_acid
-- @subpage explain_durability
- @subpage explain_isolation
<h2>Storage options</h2>
diff --git a/src/third_party/wiredtiger/src/docs/timestamp-global.dox b/src/third_party/wiredtiger/src/docs/timestamp-global.dox
index 3a895494156..7bddc1c4345 100644
--- a/src/third_party/wiredtiger/src/docs/timestamp-global.dox
+++ b/src/third_party/wiredtiger/src/docs/timestamp-global.dox
@@ -14,13 +14,15 @@ allowed to read. The historic values of data modified before this time can no
longer be read by new transactions. (Transactions already in progress
are not affected when the \c oldest_timestamp changes.)
-The stable timestamp is the earliest time at which data is considered
-fully stable. It is saved along with every checkpoint, and that saved
-time is the point to which the database is recovered after a crash.
-It is also the earliest point to which the database can be returned via an
-explicit WT_CONNECTION::rollback_to_stable call.
-(See @ref timestamp_misc_rts.)
-All transactions must commit after the current \c stable timestamp.
+The stable timestamp is the earliest time at which data is considered stable.
+(Data is said to be stable when it is not only durable, but additionally,
+transactions committed at or before the stable time cannot be rolled back by
+application-level transaction management.) The stable timestamp is saved
+along with every checkpoint, and that saved time is the point to which the
+database is recovered after a crash. It is also the earliest point to which
+the database can be returned via an explicit WT_CONNECTION::rollback_to_stable
+call. (See @ref timestamp_misc_rts.) All transactions must commit after
+the current \c stable timestamp.
Applications are responsible for managing these timestamps and
periodically updating them.
@@ -40,7 +42,7 @@ WT_CONNECTION::set_timestamp method, including constraints.
| Timestamp | Constraints | Description |
|-----------|-------------|-------------|
-| durable_timestamp | <= oldest | Reset the maximum durable timestamp |
+| durable_timestamp | <= oldest | Reset the maximum durable timestamp (see @ref timestamp_prepare for discussion of the durable timestamp). |
| oldest_timestamp | <= stable; may not move backward, set to the value as of the last checkpoint during recovery | Inform the system future reads and writes will never be earlier than the specified timestamp. |
| stable_timestamp | may not move backward, set to the recovery timestamp during recovery | Inform the system checkpoints should not include commits newer than the specified timestamp. |
@@ -61,7 +63,7 @@ in that case.
Setting \c oldest_timestamp indicates future read timestamps will be at
least as recent as the timestamp, allowing WiredTiger to discard history before
-the specified point. It is not required there be no currently active readers at
+the specified point. It is not required that there be no currently active readers at
earlier timestamps: this setting only indicates future application needs.
In other words, as active readers age out of the system, historic data up to the
oldest timestamp will be discarded, but no historic data at or after the \c
@@ -83,18 +85,12 @@ The \c oldest_timestamp must be less than or equal to the \c stable_timestamp.
The \c stable_timestamp determines the timestamp for subsequent checkpoints. In
other words, updates to an object after the stable timestamp will not be included in a
future checkpoint. Because tables in a timestamp world are generally using
-checkpoint durability, the \c stable_timestamp also determines to point to which
+checkpoint durability, the \c stable_timestamp also determines the point to which
recovery will be done after failures.
During recovery, the \c stable_timestamp is set to the value to which recovery is
performed.
-The use of the \c stable_timestamp for checkpoints can be overridden in the call
-to WT_SESSION::checkpoint.
-
-<!-- XXX
-OVERRIDING THE STABLE ts IN CHECKPOINT NEEDS MOTIVATION AND DISCUSSION -->
-
It is possible to explicitly roll back to a time after, or equal to, the current
\c stable_timestamp using the WT_CONNECTION::rollback_to_stable method.
(See @ref timestamp_misc_rts.)
@@ -107,6 +103,13 @@ performance.
Attempting to set the \c stable_timestamp to a value earlier than its current
value will be silently ignored.
+Using the stable timestamp in the checkpoint is not required, and can be
+overridden using the <tt>use_timestamp=false</tt> configuration of the
+WT_SESSION::checkpoint call. This is not intended for general use, but can
+be useful for backup scenarios where rolling back to a stable timestamp
+isn't possible and it's useful for a checkpoint to contain the most recent
+possible data.
+
@subsection timestamp_global_forcing Forcing global timestamps
\warning
@@ -128,11 +131,11 @@ the WT_CONNECTION::set_timestamp method, including constraints.
| Timestamp | Constraints | Description |
|-----------|-------------|-------------|
-| all_durable | None | The largest timestamp such that all timestamps up to that value have been made durable. |
+| all_durable | None | The largest timestamp such that all timestamps up to that value have been made durable (see @ref timestamp_prepare for discussion of the durable timestamp). |
| last_checkpoint | <= stable | The stable timestamp at which the last checkpoint ran (or 0 if no checkpoints have run). |
| oldest_reader | None | The timestamp of the oldest currently active read transaction. |
| oldest_timestamp | <= stable | The current application-set \c oldest_timestamp value. |
-| pinned | <= oldest | The minimum of the \c oldest_timestamp and the read timestamps of all active readers. |
+| pinned | <= oldest | The minimum of the \c oldest_timestamp and the oldest active reader. |
| recovery | <= stable | The stable timestamp used in the most recent checkpoint prior to the last shutdown (or 0 if none available). |
| stable_timestamp | None | The current application-set \c stable_timestamp value. |
@@ -193,13 +196,11 @@ application.
@subsection timestamp_global_query_api_pinned Reading the "pinned" timestamp
-The \c pinned timestamp is the minimum of \c oldest_timestamp and the read
-timestamps of all active readers, including any running checkpoint. It is not
-the same as \c oldest_timestamp
-because the oldest timestamp can be advanced past currently active readers,
-leaving a reader as the earliest timestamp in the system. Applications
-can use the \c pinned timestamp to understand the earliest data required by any
-reader in the system.
+The \c pinned timestamp is the minimum of \c oldest_timestamp and the oldest active
+reader, including any running checkpoint. It is not the same as \c oldest_timestamp
+because the oldest timestamp can be advanced past currently active readers, leaving
+a reader as the earliest timestamp in the system. Applications can use the \c pinned
+timestamp to understand the earliest data required by any reader in the system.
The \c pinned timestamp is read-only.
@@ -207,7 +208,7 @@ The \c pinned timestamp is read-only.
The \c recovery timestamp is the stable timestamp to which recovery was
performed on startup. Applications can use the \c recovery timestamp to
-determine the stable timestamp of the system on startup.
+retrieve the value the stable timestamp had at system startup.
The \c recovery timestamp is read-only.
diff --git a/src/third_party/wiredtiger/src/docs/timestamp-misc.dox b/src/third_party/wiredtiger/src/docs/timestamp-misc.dox
index 14f266baca0..5724a9fe814 100644
--- a/src/third_party/wiredtiger/src/docs/timestamp-misc.dox
+++ b/src/third_party/wiredtiger/src/docs/timestamp-misc.dox
@@ -4,18 +4,16 @@
Applications setting timestamps for a transaction have to comply with the
constraints based on the global timestamp state. In order to be compliant with
-the constraints applications will need to query the global timestamp state and
-check their timestamps for compliance and adjust timestamps if required, and
-this creates obvious races with other threads. To reduce application burden,
-WiredTiger supports automatic timestamp rounding in some scenarios.
+the constraints, applications will need to query the global timestamp state,
+check their timestamps for compliance, and adjust timestamps if required.
+To reduce application burden, WiredTiger supports automatic timestamp rounding
+in some scenarios.
-Applications can configure
-<code>roundup_timestamps=(prepared=true,read=true)</code> with
-the WT_SESSION::begin_transaction method.
-
-Configuring <code>roundup_timestamps=(read=true)</code> causes the read
-timestamp to be rounded up to the oldest timestamp. If the read timestamp is
-greater than the oldest timestamp no change will be made.
+Applications can configure <code>roundup_timestamps=(read=true)</code>
+with the WT_SESSION::begin_transaction method. Configuring
+<code>roundup_timestamps=(read=true)</code> causes the read timestamp to be
+rounded up to the oldest timestamp. If the read timestamp is greater than
+the oldest timestamp no change will be made.
@section timestamp_misc_rts Using rollback-to-stable with timestamps
@@ -29,7 +27,7 @@ stable timestamp.
Logged tables and updates made without an associated commit timestamp are
unaffected.
-The database must be quiescent during this process. Applications should close
+The database must be quiescent during this process, and applications must close
or reset all open cursors before calling the WT_CONNECTION::rollback_to_stable
method.
@@ -56,7 +54,7 @@ with the WT_SESSION::create method's \c assert configuration. The
be used on reads in the table. The \c "assert(write_timestamp)" configuration
requires update timestamps conform to the \c write_timestamp_usage setting. If
WiredTiger detects a violation of the configured policy, an error message
-will be logged, in all cases. Additionally, in diagnostic builds, the library
+will be logged. Additionally, in diagnostic builds, the library
will fail and drop core at the failing check.
\warning
@@ -78,7 +76,7 @@ Timestamps are supported for in-memory databases, but must be configured as in
ordinary databases, and the same APIs are used in both cases for historical
reasons. By default, in-memory database objects behave like commit-level
objects in ordinary databases, that is, timestamps are ignored. If logging
-is disabled for the object, using the \c log=(enabled=false) configuration,
+is disabled for the object, using the \c "log=(enabled=false)" configuration,
then the timestamps will not be ignored and will behave as with objects in
ordinary databases where logging has been disabled.
diff --git a/src/third_party/wiredtiger/src/docs/timestamp-model.dox b/src/third_party/wiredtiger/src/docs/timestamp-model.dox
index bf1a9f75b35..a87a87d4a89 100644
--- a/src/third_party/wiredtiger/src/docs/timestamp-model.dox
+++ b/src/third_party/wiredtiger/src/docs/timestamp-model.dox
@@ -32,7 +32,7 @@ timestamp_txn_api for a full explanation.
Timestamps are 64-bit unsigned integers naming a point in application time.
WiredTiger does not interpret timestamps other than expecting larger timestamps
-to correspond to "later" times. Timestamp 0 is reserved, so timestamps must
+to correspond to later times. Timestamp 0 is reserved, so timestamps must
start at 1 or greater. It is not necessary for timestamp values to be clock time
of any kind; an expected timestamp source is a global counter shared by
instances of an application distributed across a network, individually running
diff --git a/src/third_party/wiredtiger/src/docs/timestamp-prepare-roundup.dox b/src/third_party/wiredtiger/src/docs/timestamp-prepare-roundup.dox
index 18efd054a9c..97334475662 100644
--- a/src/third_party/wiredtiger/src/docs/timestamp-prepare-roundup.dox
+++ b/src/third_party/wiredtiger/src/docs/timestamp-prepare-roundup.dox
@@ -1,60 +1,51 @@
/*! @page timestamp_prepare_roundup Automatic prepare timestamp rounding
-Prepared transactions have their own configuration keyword for rounding
-timestamps.
-
@section timestamp_prepare_roundup_replay Replaying prepared transactions by rounding up the prepare timestamp
-It is possible for a system crash to cause a prepared transaction to
-be rolled back.
-Because the durable timestamp of a transaction is permitted to be
-later than its commit timestamp, it is even possible for a system crash to
-cause a prepared and committed transaction to be rolled back.
-Part of the purpose of the timestamp interface is to allow such
-transactions to be replayed at the same time during an
-application-level recovery phase.
+Prepared transactions have a configuration keyword for rounding timestamps.
+Applications can configure <code>roundup_timestamps=(prepare=true)</code>
+with the WT_SESSION::begin_transaction method.
-Under ordinary circumstances this is purely an application concern.
-However, because it is also allowed for the stable timestamp to move
-forward after a transaction prepares, strict enforcement of the
-timestamping rules can make replaying prepared transactions at the
-same time impossible.
+It is possible for a system crash to cause a prepared transaction to be
+rolled back. Because the durable timestamp of a transaction is permitted
+to be later than the prepared transaction's commit timestamp, it is even
+possible for a system crash to cause a prepared and committed transaction
+to be rolled back. Part of the purpose of the timestamp interface is to
+allow such transactions to be replayed at their original timestamps during
+an application-level recovery phase.
-The setting <code>roundup_timestamps=(prepared=true)</code> is
-provided to allow handling this situation.
-It disables the normal restriction that the prepare timestamp must be
-greater than the stable timestamp.
-In addition, the prepare timestamp is rounded up to the <i>oldest</i>
-timestamp (not the stable timestamp) if necessary and then the commit
-timestamp is rounded up to the prepare timestamp.
-The rounding provides some measure of safety by disallowing operations
-before oldest.
+Under ordinary circumstances this is purely an application concern. However,
+because it is also allowed for the stable timestamp to move forward after a
+transaction prepares, strict enforcement of the timestamping rules can make
+replaying prepared transactions at the same time impossible.
-Arguably the name of the setting should be more descriptive of the
-full behavior.
+The setting <code>roundup_timestamps=(prepared=true)</code> is provided to
+handle this problem. It disables the normal restriction that the prepare
+timestamp must be greater than the stable timestamp. In addition, the
+prepare timestamp is rounded up to the <i>oldest</i> timestamp (not the
+stable timestamp) if necessary and then the commit timestamp is rounded up
+to the prepare timestamp. The rounding provides some measure of safety by
+disallowing operations before oldest.
\warning
-This setting is an extremely sharp knife.
-It is safe to replay a prepared transaction at its original time,
-regardless of the stable timestamp, as long as this is done during an
-application recovery phase after a crash and before any ordinary
-operations are allowed.
-Using this setting to prepare and/or commit before the stable
-timestamp for any other purpose can lead to data inconsistency.
-Likewise, replaying anything other than the exact transaction that
-successfully prepared before the crash can lead to subtle
-inconsistencies.
-If in any doubt it is far safer to either abort the transaction (this
-requires no further action in WiredTiger) or not allow stable to
-advance past a transaction that has prepared.
+This setting is dangerous. It is safe to replay a prepared transaction at
+its original timestamps, regardless of the current stable timestamp, as
+long as it is done during an application recovery phase after a crash and
+before any ordinary operations are allowed. Using this setting to prepare
+and/or commit before the current stable timestamp for any other purpose
+can lead to data inconsistency. Likewise, replaying anything other than the
+exact transaction that successfully prepared before the crash can lead to
+subtle inconsistencies. If in any doubt, it is far safer to either abort the
+transaction (this requires no further action in WiredTiger) or not allow the
+stable timestamp to advance past the commit timestamp of a transaction that
+has been prepared.
@section timestamp_prepare_roundup_safety Safety rationale and details
-When a transaction is prepared and rolled back by a crash, then
-replayed, this creates a period of time (execution time, not timestamp
-time) where it is not there.
-Reads or writes made during this period that intersect with the
-transaction will not see it and thus will produce incorrect results.
+When a transaction is prepared and rolled back by a crash, then replayed,
+this creates a period of execution time where the transaction's updates will
+not appear. Reads or writes made during this period that intersect with
+the transaction will not see it and can therefore produce incorrect results.
An <i>application recovery phase</i> is a startup phase in application
code that is responsible for returning the application to a running
@@ -66,16 +57,13 @@ The important property is that only application-level recovery code
executes, and that code is expected to be able to take account of
special circumstances related to recovery.
-It is safe to replay a prepared transaction during an application
-recovery phase because nothing can make intersecting reads or writes
-during the period the prepared transaction is missing, and once it has
-been replayed it covers the exact same region of the database as
-before the crash, so any further intersecting reads or writes will
-behave the same as if they had been performed before the crash.
-(If for some reason the application recovery code itself needs to read
-the affected region of the database before replaying a prepared
-transaction, it is then responsible for compensating for its temporary
-absence somehow.)
+It is safe to replay a prepared transaction during an application recovery
+phase if nothing makes intersecting reads or writes during the period the
+prepared transaction is missing and the replay makes the exact same updates
+as before the crash, so any subsequent intersecting reads or writes will
+behave the same as if they had been performed before the crash. (If the
+application recovery code itself makes intersecting reads before replaying
+a prepared transaction, it is responsible for compensating.)
Because a transaction's durable timestamp is allowed to be
later than its commit timestamp, it is possible for a transaction to
@@ -90,11 +78,10 @@ before the crash, it is important to replay exactly the same write
set; otherwise reads before and after the crash might produce
::WT_PREPARE_CONFLICT inconsistently.
-It is expected that the oldest timestamp is not advanced during
-application recovery.
-The rounding behavior does not check for this possibility; if for some
-reason applications wish to advance oldest while replaying
-transactions during recovery, they must check their commit timestamps
-explicitly to avoid committing before oldest.
+It is expected the oldest timestamp will not advance during application
+recovery. The rounding behavior does not check for this possibility; if for
+some reason applications wish to advance oldest while replaying transactions
+during recovery, they must check their commit timestamps explicitly to avoid
+committing before oldest.
*/
diff --git a/src/third_party/wiredtiger/src/docs/timestamp-prepare.dox b/src/third_party/wiredtiger/src/docs/timestamp-prepare.dox
index 4877db0746e..55dac15dbf8 100644
--- a/src/third_party/wiredtiger/src/docs/timestamp-prepare.dox
+++ b/src/third_party/wiredtiger/src/docs/timestamp-prepare.dox
@@ -10,10 +10,9 @@ transaction, which will be used for visibility checks until the transaction is
committed or aborted. Once a transaction has been prepared no further data
operations are permitted, and the transaction must next be resolved by calling
WT_SESSION::commit_transaction or WT_SESSION::rollback_transaction.
-Calling WT_SESSION::prepare_transaction only
-guarantees that transactional conflicts will not cause the transaction to
-roll back and specifically does not guarantee the transaction's updates are
-durable.
+Calling WT_SESSION::prepare_transaction only guarantees that a subsequent
+WT_SESSION::commit_transaction will succeed and specifically does not
+guarantee the transaction's updates are durable.
If a read operation encounters an update from a prepared transaction,
the error ::WT_PREPARE_CONFLICT will be returned indicating it is not possible to
@@ -53,6 +52,7 @@ reading its writes in a second transaction and then committing other writes such
that the second transaction becomes durable before the first can produce data
inconsistency.
+\warning
In this scenario the second transaction depends on the first; thus it must be
rolled back if the first transaction is rolled back; thus it must not become
durable before the first transaction.
@@ -64,6 +64,7 @@ this scenario cannot occur.
transaction is also its durable timestamp, and committing with no timestamp is
roughly comparable to committing at the current stable timestamp.)
+\warning
This scenario is not currently detected by WiredTiger; applications are
responsible for avoiding it.
In future versions such transactions might fail.
diff --git a/src/third_party/wiredtiger/src/docs/timestamp-txn.dox b/src/third_party/wiredtiger/src/docs/timestamp-txn.dox
index e782dc75ce9..5b35258a313 100644
--- a/src/third_party/wiredtiger/src/docs/timestamp-txn.dox
+++ b/src/third_party/wiredtiger/src/docs/timestamp-txn.dox
@@ -14,19 +14,22 @@ With a read timestamp, reads will occur as of the specified time.
The transaction's commit timestamp is the time at which the transaction takes
effect. This is the time at which other transactions, with appropriately set
read timestamps, will see the transaction's writes instead of any previous
-value.
+value. Applications can also set commit timestamps on a per-update basis in
+a single transaction, in which case the commit timestamp is the time of the
+visibility of the effected updates.
Updating a key without a commit timestamp creates a value that has "always
-existed". This makes sense when loading initial data into an object or in
-applications wishing to clear historic values, but once timestamps are
-used to update a particular value, subsequent updates will likely also
-use a commit timestamp. Updating a key with a commit timestamp and then
-subsequently updating it without a commit timestamp will discard all prior
-historical values, and future reads will read the new value regardless of
-read timestamp. In other words, readers with already acquired snapshots will
-see prior historical values based on their timestamps. Readers acquiring
-a snapshot after the commit of the update without a timestamp will not see
-prior historical values regardless of their read timestamps.
+existed" and is visible regardless of timestamp. This makes sense when
+loading initial data into an object or in applications wishing to clear
+historic values, but once timestamps are used to update a particular value,
+subsequent updates will likely also use a commit timestamp. Updating a key
+with a commit timestamp and then subsequently updating it without a commit
+timestamp will discard all prior historical values, and future reads will
+read the new value regardless of read timestamp. In other words, readers
+with already acquired snapshots will see prior historical values based
+on their timestamps. Readers acquiring a snapshot after the commit of the
+update without a timestamp will not see prior historical values regardless
+of their read timestamps.
@section timestamp_txn_api_configure Enforcing application timestamp behavior
@@ -55,7 +58,7 @@ The following table lists the transaction's timestamps and behaviors that can be
| Timestamp | Constraint | Description |
|-----------|------------|-------------|
-| read_timestamp | >= oldest or >= pinned | the transaction's read timestamp, see @ref timestamp_txn_api_read_timestamp for details |
+| read_timestamp | >= oldest | the transaction's read timestamp, see @ref timestamp_txn_api_read_timestamp for details |
| roundup_timestamps | None | boolean setting for timestamp auto-adjustments, see @ref timestamp_prepare_roundup and @ref timestamp_read_roundup for details |
@section timestamp_txn_api_commit Configuring transaction timestamp information with WT_SESSION::commit_transaction
@@ -87,7 +90,7 @@ points in the transaction's lifetime, using WT_SESSION::timestamp_transaction:
| commit_timestamp | > stable and >= prepare and >= any system read timestamp | the transaction's commit timestamp, see @ref timestamp_txn_api_commit_timestamp for details |
| durable_timestamp | >= commit | the transaction's durable timestamp, only applicable to prepared transactions, see @ref timestamp_prepare for details |
| prepare_timestamp | > stable and >= any system read timestamp | the transaction's prepare timestamp, see @ref timestamp_prepare for details |
-| read_timestamp | >= oldest or >= pinned | the transaction's read timestamp, see @ref timestamp_txn_api_read_timestamp for details |
+| read_timestamp | >= oldest | the transaction's read timestamp, see @ref timestamp_txn_api_read_timestamp for details |
@section timestamp_txn_api_commit_timestamp Setting the transaction's commit timestamp
@@ -133,9 +136,8 @@ timestamp order are expected to explicitly fail in future releases.
Some diagnostic tools are available to help enforce this constraint;
see @ref timestamp_misc_diagnostic.
-For prepared transactions, the commit timestamp must not be less than the
-prepare timestamp. Otherwise, the commit
-timestamp must not be less than the system's stable timestamp.
+For prepared transactions, the commit timestamp must not be before the prepare
+timestamp. Otherwise, the commit timestamp must be after the stable timestamp.
@section timestamp_txn_api_read_timestamp Setting the transaction's read timestamp
@@ -146,9 +148,8 @@ a newer timestamp, of course.),
The read timestamp may be set to any time equal to or after the system's
\c oldest timestamp.
-This restriction is enforced and
-applications can rely on an error return to detect attempts to set the read
-timestamp older than the \c oldest timestamp.
+This restriction is enforced and applications can rely on an error return to
+detect attempts to set the read timestamp older than the \c oldest timestamp.
The read timestamp may only be set once in the lifetime of a transaction.
diff --git a/src/third_party/wiredtiger/src/docs/transactions.dox b/src/third_party/wiredtiger/src/docs/transactions.dox
index 1607b7a47ca..9ea1faee4c2 100644
--- a/src/third_party/wiredtiger/src/docs/transactions.dox
+++ b/src/third_party/wiredtiger/src/docs/transactions.dox
@@ -1,7 +1,7 @@
/*! @page transactions Transactional applications
-WiredTiger offers standard ACID-style transaction support where operations are
-done at snapshot isolation and subsequently become durable. (Readers not
+WiredTiger offers standard ACID-style transaction support where modifications happen
+at snapshot isolation and subsequently become durable. (Readers not
already familiar with these concepts may wish to see @ref explain_acid for a brief
discussion of ACID and @ref explain_isolation for a brief discussion of isolation
levels.)
@@ -11,9 +11,10 @@ There are three approaches to writing transactional programs in WiredTiger:
applications with simple transactions where updates become durable when
the last reference to the object is closed or at the next database checkpoint.
--# Applications supporting \b commit-level durability, which extends checkpoint-level
-durability, adding logging to the database so updates are durable as
-soon as their transaction successfully commits.
+-# Applications supporting \b commit-level durability, which extends
+checkpoint-level durability, adding logging to the database so updates written
+on behalf of a transaction become durable as soon as the transaction's log
+records become durable.
-# Applications using timestamps for fine-grained control of the database,
extending checkpoint-level durability. This allows applications to do things
@@ -34,17 +35,18 @@ the first approach and iterating to a complete application is recommended.
We will discuss these approaches in order, from the simplest to the most
complex.
-If this is your first WiredTiger application, please read @ref explain_durability
+If this is your first WiredTiger application, please read @ref durability_overview
for a discussion of WiredTiger durability models, so you can select the application
-architecture that's appropriate for your needs.
+architecture that is appropriate for your needs.
\warning
-Recovery is the process of restoring the database to a consistent state after
-failure. Recovery is automatically performed by WiredTiger, as necessary, when a
-database is opened. Recovery is required after the failure of any thread of
-control in the application, where the failed thread might have been executing
-inside of the WiredTiger library or open WiredTiger handles have been lost. If
-any application thread of control exits unexpectedly while holding any database
-resources of any kind, the application should close and re-open the database.
+Recovery is the process of restoring the database to a transactionally
+consistent state after failure. Recovery is automatically performed by
+WiredTiger, as necessary, when a database is opened. Recovery is required
+after the failure of any thread of control in the application, where the
+failed thread might have been executing inside of the WiredTiger library
+or open WiredTiger handles have been lost. If any application thread of
+control exits unexpectedly while holding any database resources of any kind,
+the application should close and re-open the database.
*/
diff --git a/src/third_party/wiredtiger/src/docs/transactions_api.dox b/src/third_party/wiredtiger/src/docs/transactions_api.dox
index 84a09447e0e..e3083aaac59 100644
--- a/src/third_party/wiredtiger/src/docs/transactions_api.dox
+++ b/src/third_party/wiredtiger/src/docs/transactions_api.dox
@@ -33,9 +33,11 @@ violation of consistency or isolation.) Failures of this form produce the error
current transaction using WT_SESSION::rollback_transaction, and either abandon
the operation or retry the entire operation by starting a new transaction.
-The WT_SESSION::rollback_transaction method resets all cursors in the session
-(as if the WT_CURSOR::reset method was called), discarding any cursor position
-as well as any currently set keys or values they may have.
+After a transaction is successfully committed, cursors in the session retain
+their position, as well as any currently set keys or values they may have.
+If a transaction is rolled back for any reason, cursors in the session are
+reset (as if the WT_CURSOR::reset method was called), discarding any cursor
+position as well as any currently set keys or values.
@snippet ex_all.c transaction commit/rollback
@@ -67,7 +69,7 @@ allocated to hold the data required to satisfy transactional readers, operations
may fail and return ::WT_ROLLBACK.
\warning
-As read transactions rarely fail, application developers may fail to correctly
+As read transactions rarely fail, application developers may neglect to correctly
handle a read transaction return of ::WT_ROLLBACK. Applications must be written
to anticipate read failure.
diff --git a/src/third_party/wiredtiger/src/docs/tune-cache.dox b/src/third_party/wiredtiger/src/docs/tune-cache.dox
index fc42fc80046..78e4b8181d9 100644
--- a/src/third_party/wiredtiger/src/docs/tune-cache.dox
+++ b/src/third_party/wiredtiger/src/docs/tune-cache.dox
@@ -98,7 +98,7 @@ same level.
By default, WiredTiger cache eviction is handled by a single, separate
thread. In a large, busy cache, a single thread will be insufficient
(especially when the eviction thread must wait for I/O). The
-\c eviction=(threads_min) and \c eviction=(threads_max) configuration
+\c "eviction=(threads_min)" and \c "eviction=(threads_max)" configuration
values can be used to configure the minimum and maximum number of
additional threads WiredTiger will create to keep up with the
application eviction load. Finally, if the Wiredtiger eviction threads
diff --git a/src/third_party/wiredtiger/src/docs/tune-durability.dox b/src/third_party/wiredtiger/src/docs/tune-durability.dox
index b8c62081362..615ca204ee9 100644
--- a/src/third_party/wiredtiger/src/docs/tune-durability.dox
+++ b/src/third_party/wiredtiger/src/docs/tune-durability.dox
@@ -16,7 +16,7 @@ WT_SESSION::commit_transaction returns, giving highest performance but
not ensuring durability. The durability guarantees can be stricter but
this will impact performance.
-If \c transaction_sync=(enabled=false) is configured to ::wiredtiger_open,
+If \c "transaction_sync=(enabled=false)" is configured in ::wiredtiger_open,
log records may be buffered in memory, and only flushed to disk by
checkpoints, when log files switch or calls to WT_SESSION::commit_transaction
with \c sync=on. (Note that any call to WT_SESSION::commit_transaction
@@ -24,10 +24,10 @@ with \c sync=on will flush the log records for all committed transactions,
not just the transaction where the configuration is set.) This provides the
minimal guarantees, but will be significantly faster than other configurations.
-If \c transaction_sync=(enabled=true), \c transaction_sync=(method)
+If \c "transaction_sync=(enabled=true)", \c "transaction_sync=(method)"
further configures the method used to flush log records to disk. By
default, the configured value is \c fsync, which calls the operating
-system's \c fsync call (of \c fdatasync if available) as each commit completes.
+system's \c fsync call (or \c fdatasync if available) as each commit completes.
If the value is set to \c dsync, the \c O_DSYNC or \c O_SYNC
flag to the operating system's \c open call will be specified when the
@@ -36,10 +36,10 @@ configurations are the same, and in our experience the \c open flags are
slower; this configuration is only included for systems where that may
not be the case.)
-If the value is set to \c none, the operating system's \c write call
-will be called as each commit completes but no explicit disk flush is made.
-This setting gives durability at the application level but not at
-the system level.
+If the value is set to \c none, the operating system's \c write call will
+be called as each commit completes but no explicit disk flush is made.
+This setting gives durability across application failure, but likely not
+across system failure (depending on operating system guarantees).
When a log file fills and the system moves to the next log file, the
previous log file will always be flushed to disk prior to close. So
@@ -74,7 +74,7 @@ the \c sync configuration that override the connection level settings.
If \c sync=on is configured then this commit operation will wait for its
log records, and all earlier ones, to be durable to the extent specified
-by the \c transaction_sync=(method) setting before returning.
+by the \c "transaction_sync=(method)" setting before returning.
If \c sync=off is configured then this commit operation will write its
records into the in-memory buffer and return immediately.
diff --git a/src/third_party/wiredtiger/src/docs/verbose-messaging.dox b/src/third_party/wiredtiger/src/docs/verbose-messaging.dox
index 0afc65bbc25..e939e3303b6 100644
--- a/src/third_party/wiredtiger/src/docs/verbose-messaging.dox
+++ b/src/third_party/wiredtiger/src/docs/verbose-messaging.dox
@@ -52,7 +52,7 @@ An example configuring a WiredTiger connection with verbosity enabled:
In the above example:
- \c api is configured to the \c DEBUG verbosity level.
- \c version is configured to the \c DEBUG verbosity level. If a category is passed without a verbosity
-level (.e.g \c <code>:0</code>), the category will default to the \c DEBUG level (i.e \c <code>verbose=[version]</code> is equivalent to <code>verbose=[version:1]</code>).
+level (.e.g \c <code>:0</code>), the category will default to the \c DEBUG level (i.e <code>verbose=[version]</code> is equivalent to <code>verbose=[version:1]</code>).
- \c write is configured to the \c INFO verbosity level.
When configuring verbosity levels, the lowest value the user can associate with a category is \c 0 (\c INFO). This ensuring
diff --git a/src/third_party/wiredtiger/src/include/wiredtiger.in b/src/third_party/wiredtiger/src/include/wiredtiger.in
index 9769df7661e..b402a564eb7 100644
--- a/src/third_party/wiredtiger/src/include/wiredtiger.in
+++ b/src/third_party/wiredtiger/src/include/wiredtiger.in
@@ -1895,8 +1895,8 @@ struct __wt_session {
* \c use_timestamp=true (the default), updates committed with a timestamp after the
* \c stable timestamp, in tables configured for checkpoint-level durability, are not
* included in the checkpoint. Updates committed in tables configured for commit-level
- * durability are always included in the checkpoint. See @ref checkpoint and @ref durability
- * for more information.
+ * durability are always included in the checkpoint. See @ref durability_checkpoint and
+ * @ref durability_log for more information.
*
* Calling the checkpoint method multiple times serializes the checkpoints, new checkpoint
* calls wait for running checkpoint calls to complete.
@@ -1925,9 +1925,9 @@ struct __wt_session {
* objects separately from a database-wide checkpoint can lead to data inconsistencies\, see
* @ref checkpoint_target for more information., a list of strings; default empty.}
* @config{use_timestamp, if true (the default)\, create the checkpoint as of the last
- * stable timestamp if timestamps are in use\, or all current updates if there is no stable
- * timestamp set. If false\, this option generates a checkpoint with all updates including
- * those later than the timestamp., a boolean flag; default \c true.}
+ * stable timestamp if timestamps are in use\, or with all committed updates if there is no
+ * stable timestamp set. If false\, always generate a checkpoint with all committed
+ * updates\, ignoring any stable timestamp., a boolean flag; default \c true.}
* @configend
* @errors
*/
@@ -2523,7 +2523,7 @@ struct __wt_connection {
* value to older than the current is silently ignored. The value must not be newer than
* the current stable timestamp. See @ref timestamp_global_api., a string; default empty.}
* @config{stable_timestamp, checkpoints will not include commits that are newer than the
- * specified timestamp in tables configured with \c log=(enabled=false). Values must be
+ * specified timestamp in tables configured with \c "log=(enabled=false)". Values must be
* monotonically increasing\, any attempt to set the value to older than the current is
* silently ignored. The value must not be older than the current oldest timestamp. See
* @ref timestamp_global_api., a string; default empty.}