summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/dist
diff options
context:
space:
mode:
authorMatt Kangas <matt.kangas@mongodb.com>2014-12-12 10:56:37 -0500
committerMatt Kangas <matt.kangas@mongodb.com>2014-12-12 10:56:37 -0500
commitd3877ceca348713dc4d4347249a3976ee466edd3 (patch)
treea3ebc6344fc7fc8332f4da12057208660bd96d9d /src/third_party/wiredtiger/dist
parent6b6471ea34f384e747eeef83dfdfcaee1b332bd0 (diff)
downloadmongo-d3877ceca348713dc4d4347249a3976ee466edd3.tar.gz
Import wiredtiger-wiredtiger-2.8-rc2-171-g378f727.tar.gz from wiredtiger branch mongodb-2.8
Diffstat (limited to 'src/third_party/wiredtiger/dist')
-rw-r--r--src/third_party/wiredtiger/dist/api_config.py2
-rw-r--r--src/third_party/wiredtiger/dist/api_data.py1463
-rw-r--r--src/third_party/wiredtiger/dist/api_err.py161
-rw-r--r--src/third_party/wiredtiger/dist/db.py24
-rw-r--r--src/third_party/wiredtiger/dist/dist.py46
-rw-r--r--src/third_party/wiredtiger/dist/filelist11
-rw-r--r--src/third_party/wiredtiger/dist/filelist.win11
-rw-r--r--src/third_party/wiredtiger/dist/flags.py296
-rw-r--r--src/third_party/wiredtiger/dist/java_doc.py19
-rw-r--r--src/third_party/wiredtiger/dist/log.py315
-rw-r--r--src/third_party/wiredtiger/dist/s_string.ok2
-rwxr-xr-xsrc/third_party/wiredtiger/dist/s_style14
-rw-r--r--src/third_party/wiredtiger/dist/serial.py192
-rw-r--r--src/third_party/wiredtiger/dist/stat.py170
-rw-r--r--src/third_party/wiredtiger/dist/stat_data.py46
15 files changed, 1445 insertions, 1327 deletions
diff --git a/src/third_party/wiredtiger/dist/api_config.py b/src/third_party/wiredtiger/dist/api_config.py
index 6ca0275f228..8012a8a6be7 100644
--- a/src/third_party/wiredtiger/dist/api_config.py
+++ b/src/third_party/wiredtiger/dist/api_config.py
@@ -155,7 +155,7 @@ compare_srcfile(tmp_file, f)
f='../src/config/config_def.c'
tfile = open(tmp_file, 'w')
-tfile.write('''/* DO NOT EDIT: automatically built by dist/config.py. */
+tfile.write('''/* DO NOT EDIT: automatically built by dist/api_config.py. */
#include "wt_internal.h"
''')
diff --git a/src/third_party/wiredtiger/dist/api_data.py b/src/third_party/wiredtiger/dist/api_data.py
index 5203b591dae..42ec64ff344 100644
--- a/src/third_party/wiredtiger/dist/api_data.py
+++ b/src/third_party/wiredtiger/dist/api_data.py
@@ -1,383 +1,386 @@
# This file is a python script that describes the WiredTiger API.
class Error:
- def __init__(self, name, desc, long_desc=None, **flags):
- self.name = name
- self.desc = desc
- self.long_desc = long_desc
- self.flags = flags
+ def __init__(self, name, desc, long_desc=None, **flags):
+ self.name = name
+ self.desc = desc
+ self.long_desc = long_desc
+ self.flags = flags
errors = [
- Error('WT_DUPLICATE_KEY', 'attempt to insert an existing key', '''
- This error is generated when the application attempts to insert
- a record with the same key as an existing record without the
- 'overwrite' configuration to WT_SESSION::open_cursor.'''),
- Error('WT_ERROR', 'non-specific WiredTiger error', '''
- This error is returned when an error is not covered by a
- specific error return.'''),
- Error('WT_NOTFOUND', 'item not found', '''
- This error indicates an operation did not find a value to
- return. This includes cursor search and other operations
- where no record matched the cursor's search key such as
- WT_CURSOR::update or WT_CURSOR::remove.'''),
- Error('WT_PANIC', 'WiredTiger library panic', '''
- This error indicates an underlying problem that requires the
- application exit and restart.'''),
- Error('WT_RESTART', 'restart the operation (internal)', undoc=True),
- Error('WT_ROLLBACK', 'conflict between concurrent operations', '''
- This error is generated when an operation cannot be completed
- due to a conflict with concurrent operations. The operation
- may be retried; if a transaction is in progress, it should be
- rolled back and the operation retried in a new transaction.'''),
+ Error('WT_DUPLICATE_KEY', 'attempt to insert an existing key', '''
+ This error is generated when the application attempts to insert
+ a record with the same key as an existing record without the
+ 'overwrite' configuration to WT_SESSION::open_cursor.'''),
+ Error('WT_ERROR', 'non-specific WiredTiger error', '''
+ This error is returned when an error is not covered by a
+ specific error return.'''),
+ Error('WT_NOTFOUND', 'item not found', '''
+ This error indicates an operation did not find a value to
+ return. This includes cursor search and other operations
+ where no record matched the cursor's search key such as
+ WT_CURSOR::update or WT_CURSOR::remove.'''),
+ Error('WT_PANIC', 'WiredTiger library panic', '''
+ This error indicates an underlying problem that requires the
+ application exit and restart.'''),
+ Error('WT_RESTART', 'restart the operation (internal)', undoc=True),
+ Error('WT_ROLLBACK', 'conflict between concurrent operations', '''
+ This error is generated when an operation cannot be completed
+ due to a conflict with concurrent operations. The operation
+ may be retried; if a transaction is in progress, it should be
+ rolled back and the operation retried in a new transaction.'''),
]
class Method:
- def __init__(self, config, **flags):
- self.config = config
- self.flags = flags
+ def __init__(self, config, **flags):
+ self.config = config
+ self.flags = flags
class Config:
- def __init__(self, name, default, desc, subconfig=None, **flags):
- self.name = name
- self.default = default
- self.desc = desc
- self.subconfig = subconfig
- self.flags = flags
+ def __init__(self, name, default, desc, subconfig=None, **flags):
+ self.name = name
+ self.default = default
+ self.desc = desc
+ self.subconfig = subconfig
+ self.flags = flags
- def __cmp__(self, other):
- return cmp(self.name, other.name)
+ def __cmp__(self, other):
+ return cmp(self.name, other.name)
# Metadata shared by all schema objects
common_meta = [
- Config('app_metadata', '', r'''
- application-owned metadata for this object'''),
- Config('columns', '', r'''
- list of the column names. Comma-separated list of the form
- <code>(column[,...])</code>. For tables, the number of entries
- must match the total number of values in \c key_format and \c
- value_format. For colgroups and indices, all column names must
- appear in the list of columns for the table''',
- type='list'),
+ Config('app_metadata', '', r'''
+ application-owned metadata for this object'''),
+ Config('columns', '', r'''
+ list of the column names. Comma-separated list of the form
+ <code>(column[,...])</code>. For tables, the number of entries
+ must match the total number of values in \c key_format and \c
+ value_format. For colgroups and indices, all column names must
+ appear in the list of columns for the table''',
+ type='list'),
]
source_meta = [
- Config('source', '', r'''
- set a custom data source URI for a column group, index or simple
- table. By default, the data source URI is derived from the \c
- type and the column group or index name. Applications can
- create tables from existing data sources by supplying a \c
- source configuration''', undoc=True),
- Config('type', 'file', r'''
- set the type of data source used to store a column group, index
- or simple table. By default, a \c "file:" URI is derived from
- the object name. The \c type configuration can be used to
- switch to a different data source, such as LSM or an extension
- configured by the application'''),
+ Config('source', '', r'''
+ set a custom data source URI for a column group, index or simple
+ table. By default, the data source URI is derived from the \c
+ type and the column group or index name. Applications can
+ create tables from existing data sources by supplying a \c
+ source configuration''', undoc=True),
+ Config('type', 'file', r'''
+ set the type of data source used to store a column group, index
+ or simple table. By default, a \c "file:" URI is derived from
+ the object name. The \c type configuration can be used to
+ switch to a different data source, such as LSM or an extension
+ configured by the application'''),
]
format_meta = common_meta + [
- Config('key_format', 'u', r'''
- the format of the data packed into key items. See @ref
- schema_format_types for details. By default, the key_format is
- \c 'u' and applications use WT_ITEM structures to manipulate
- raw byte arrays. By default, records are stored in row-store
- files: keys of type \c 'r' are record numbers and records
- referenced by record number are stored in column-store files''',
- type='format'),
- Config('value_format', 'u', r'''
- the format of the data packed into value items. See @ref
- schema_format_types for details. By default, the value_format
- is \c 'u' and applications use a WT_ITEM structure to
- manipulate raw byte arrays. Value items of type 't' are
- bitfields, and when configured with record number type keys,
- will be stored using a fixed-length store''',
- type='format'),
+ Config('key_format', 'u', r'''
+ the format of the data packed into key items. See @ref
+ schema_format_types for details. By default, the key_format is
+ \c 'u' and applications use WT_ITEM structures to manipulate
+ raw byte arrays. By default, records are stored in row-store
+ files: keys of type \c 'r' are record numbers and records
+ referenced by record number are stored in column-store files''',
+ type='format'),
+ Config('value_format', 'u', r'''
+ the format of the data packed into value items. See @ref
+ schema_format_types for details. By default, the value_format
+ is \c 'u' and applications use a WT_ITEM structure to
+ manipulate raw byte arrays. Value items of type 't' are
+ bitfields, and when configured with record number type keys,
+ will be stored using a fixed-length store''',
+ type='format'),
]
lsm_config = [
- Config('lsm', '', r'''
- options only relevant for LSM data sources''',
- type='category', subconfig=[
- Config('auto_throttle', 'true', r'''
- Throttle inserts into LSM trees if flushing to disk isn't
- keeping up''',
- type='boolean'),
- Config('bloom', 'true', r'''
- create bloom filters on LSM tree chunks as they are merged''',
- type='boolean'),
- Config('bloom_config', '', r'''
- config string used when creating Bloom filter files, passed
- to WT_SESSION::create'''),
- Config('bloom_bit_count', '16', r'''
- the number of bits used per item for LSM bloom filters''',
- min='2', max='1000'),
- Config('bloom_hash_count', '8', r'''
- the number of hash values per item used for LSM bloom
- filters''',
- min='2', max='100'),
- Config('bloom_oldest', 'false', r'''
- create a bloom filter on the oldest LSM tree chunk. Only
- supported if bloom filters are enabled''',
- type='boolean'),
- Config('chunk_max', '5GB', r'''
- the maximum size a single chunk can be. Chunks larger than this
- size are not considered for further merges. This is a soft
- limit, and chunks larger than this value can be created. Must
- be larger than chunk_size''',
- min='100MB', max='10TB'),
- Config('chunk_size', '10MB', r'''
- the maximum size of the in-memory chunk of an LSM tree. This
- limit is soft - it is possible for chunks to be temporarily
- larger than this value. This overrides the \c memory_page_max
- setting''',
- min='512K', max='500MB'),
- Config('merge_max', '15', r'''
- the maximum number of chunks to include in a merge operation''',
- min='2', max='100'),
- Config('merge_min', '0', r'''
- the minimum number of chunks to include in a merge operation. If
- set to 0 or 1 half the value of merge_max is used''',
- max='100'),
- ]),
+ Config('lsm', '', r'''
+ options only relevant for LSM data sources''',
+ type='category', subconfig=[
+ Config('auto_throttle', 'true', r'''
+ Throttle inserts into LSM trees if flushing to disk isn't
+ keeping up''',
+ type='boolean'),
+ Config('bloom', 'true', r'''
+ create bloom filters on LSM tree chunks as they are merged''',
+ type='boolean'),
+ Config('bloom_config', '', r'''
+ config string used when creating Bloom filter files, passed
+ to WT_SESSION::create'''),
+ Config('bloom_bit_count', '16', r'''
+ the number of bits used per item for LSM bloom filters''',
+ min='2', max='1000'),
+ Config('bloom_hash_count', '8', r'''
+ the number of hash values per item used for LSM bloom
+ filters''',
+ min='2', max='100'),
+ Config('bloom_oldest', 'false', r'''
+ create a bloom filter on the oldest LSM tree chunk. Only
+ supported if bloom filters are enabled''',
+ type='boolean'),
+ Config('chunk_max', '5GB', r'''
+ the maximum size a single chunk can be. Chunks larger than this
+ size are not considered for further merges. This is a soft
+ limit, and chunks larger than this value can be created. Must
+ be larger than chunk_size''',
+ min='100MB', max='10TB'),
+ Config('chunk_size', '10MB', r'''
+ the maximum size of the in-memory chunk of an LSM tree. This
+ limit is soft - it is possible for chunks to be temporarily
+ larger than this value. This overrides the \c memory_page_max
+ setting''',
+ min='512K', max='500MB'),
+ Config('merge_max', '15', r'''
+ the maximum number of chunks to include in a merge operation''',
+ min='2', max='100'),
+ Config('merge_min', '0', r'''
+ the minimum number of chunks to include in a merge operation. If
+ set to 0 or 1 half the value of merge_max is used''',
+ max='100'),
+ ]),
]
# Per-file configuration
file_config = format_meta + [
- Config('block_allocation', 'best', r'''
- configure block allocation. Permitted values are \c "first" or
- \c "best"; the \c "first" configuration uses a first-available
- algorithm during block allocation, the \c "best" configuration
- uses a best-fit algorithm''',
- choices=['first', 'best',]),
- Config('allocation_size', '4KB', r'''
- the file unit allocation size, in bytes, must a power-of-two;
- smaller values decrease the file space required by overflow
- items, and the default value of 4KB is a good choice absent
- requirements from the operating system or storage device''',
- min='512B', max='128MB'),
- Config('block_compressor', '', r'''
- configure a compressor for file blocks. Permitted values are
- empty (off) or \c "bzip2", \c "snappy" or custom compression
- engine \c "name" created with WT_CONNECTION::add_compressor.
- See @ref compression for more information'''),
- Config('cache_resident', 'false', r'''
- do not ever evict the object's pages; see @ref
- tuning_cache_resident for more information''',
- type='boolean'),
- Config('checksum', 'uncompressed', r'''
- configure block checksums; permitted values are <code>on</code>
- (checksum all blocks), <code>off</code> (checksum no blocks) and
- <code>uncompresssed</code> (checksum only blocks which are not
- compressed for any reason). The \c uncompressed setting is for
- applications which can rely on decompression to fail if a block
- has been corrupted''',
- choices=['on', 'off', 'uncompressed']),
- Config('collator', '', r'''
- configure custom collation for keys. Value must be a collator
- name created with WT_CONNECTION::add_collator'''),
- Config('dictionary', '0', r'''
- the maximum number of unique values remembered in the Btree
- row-store leaf page value dictionary; see
- @ref file_formats_compression for more information''',
- min='0'),
- Config('format', 'btree', r'''
- the file format''',
- choices=['btree']),
- Config('huffman_key', '', r'''
- configure Huffman encoding for keys. Permitted values
- are empty (off), \c "english", \c "utf8<file>" or \c
- "utf16<file>". See @ref huffman for more information'''),
- Config('huffman_value', '', r'''
- configure Huffman encoding for values. Permitted values
- are empty (off), \c "english", \c "utf8<file>" or \c
- "utf16<file>". See @ref huffman for more information'''),
- Config('internal_key_truncate', 'true', r'''
- configure internal key truncation, discarding unnecessary
- trailing bytes on internal keys (ignored for custom
- collators)''',
- type='boolean'),
- Config('internal_page_max', '4KB', r'''
- the maximum page size for internal nodes, in bytes; the size
- must be a multiple of the allocation size and is significant
- for applications wanting to avoid excessive L2 cache misses
- while searching the tree. The page maximum is the bytes of
- uncompressed data, that is, the limit is applied before any
- block compression is done''',
- min='512B', max='512MB'),
- Config('internal_item_max', '0', r'''
- the largest key stored within an internal node, in bytes. If
- non-zero, any key larger than the specified size will be
- stored as an overflow item (which may require additional I/O
- to access). If zero, a default size is chosen that permits at
- least 8 keys per internal page''',
- min=0),
- Config('key_gap', '10', r'''
- the maximum gap between instantiated keys in a Btree leaf page,
- constraining the number of keys processed to instantiate a
- random Btree leaf page key''',
- min='0', undoc=True),
- Config('leaf_page_max', '32KB', r'''
- the maximum page size for leaf nodes, in bytes; the size must
- be a multiple of the allocation size, and is significant for
- applications wanting to maximize sequential data transfer from
- a storage device. The page maximum is the bytes of uncompressed
- data, that is, the limit is applied before any block compression
- is done''',
- min='512B', max='512MB'),
- Config('leaf_item_max', '0', r'''
- the largest key or value stored within a leaf node, in bytes.
- If non-zero, any key or value larger than the specified size
- will be stored as an overflow item (which may require additional
- I/O to access). If zero, a default size is chosen that permits
- at least 4 key and value pairs per leaf page''',
- min=0),
- Config('memory_page_max', '5MB', r'''
- the maximum size a page can grow to in memory before being
- reconciled to disk. The specified size will be adjusted to a lower
- bound of <code>50 * leaf_page_max</code>, and an upper bound of
- <code>cache_size / 2</code>. This limit is soft - it is possible
- for pages to be temporarily larger than this value. This setting
- is ignored for LSM trees, see \c chunk_size''',
- min='512B', max='10TB'),
- Config('os_cache_max', '0', r'''
- maximum system buffer cache usage, in bytes. If non-zero, evict
- object blocks from the system buffer cache after that many bytes
- from this object are read or written into the buffer cache''',
- min=0),
- Config('os_cache_dirty_max', '0', r'''
- maximum dirty system buffer cache usage, in bytes. If non-zero,
- schedule writes for dirty blocks belonging to this object in the
- system buffer cache after that many bytes from this object are
- written into the buffer cache''',
- min=0),
- Config('prefix_compression', 'false', r'''
- configure prefix compression on row-store leaf pages''',
- type='boolean'),
- Config('prefix_compression_min', '4', r'''
- minimum gain before prefix compression will be used on row-store
- leaf pages''',
- min=0),
- Config('split_pct', '75', r'''
- the Btree page split size as a percentage of the maximum Btree
- page size, that is, when a Btree page is split, it will be
- split into smaller pages, where each page is the specified
- percentage of the maximum Btree page size''',
- min='25', max='100'),
+ Config('block_allocation', 'best', r'''
+ configure block allocation. Permitted values are \c "first" or
+ \c "best"; the \c "first" configuration uses a first-available
+ algorithm during block allocation, the \c "best" configuration
+ uses a best-fit algorithm''',
+ choices=['first', 'best',]),
+ Config('allocation_size', '4KB', r'''
+ the file unit allocation size, in bytes, must a power-of-two;
+ smaller values decrease the file space required by overflow
+ items, and the default value of 4KB is a good choice absent
+ requirements from the operating system or storage device''',
+ min='512B', max='128MB'),
+ Config('block_compressor', '', r'''
+ configure a compressor for file blocks. Permitted values are
+ \c "none" or custom compression engine name created with
+ WT_CONNECTION::add_compressor. If WiredTiger has builtin support
+ for \c "snappy" or \c "zlib" compression, these names are also
+ available. See @ref compression for more information'''),
+ Config('cache_resident', 'false', r'''
+ do not ever evict the object's pages; see @ref
+ tuning_cache_resident for more information''',
+ type='boolean'),
+ Config('checksum', 'uncompressed', r'''
+ configure block checksums; permitted values are <code>on</code>
+ (checksum all blocks), <code>off</code> (checksum no blocks) and
+ <code>uncompresssed</code> (checksum only blocks which are not
+ compressed for any reason). The \c uncompressed setting is for
+ applications which can rely on decompression to fail if a block
+ has been corrupted''',
+ choices=['on', 'off', 'uncompressed']),
+ Config('collator', '', r'''
+ configure custom collation for keys. Permitted values are
+ \c "none" or a custom collator name created with
+ WT_CONNECTION::add_collator'''),
+ Config('dictionary', '0', r'''
+ the maximum number of unique values remembered in the Btree
+ row-store leaf page value dictionary; see
+ @ref file_formats_compression for more information''',
+ min='0'),
+ Config('format', 'btree', r'''
+ the file format''',
+ choices=['btree']),
+ Config('huffman_key', '', r'''
+ configure Huffman encoding for keys. Permitted values are
+ \c "none", \c "english", \c "utf8<file>" or \c "utf16<file>".
+ See @ref huffman for more information'''),
+ Config('huffman_value', '', r'''
+ configure Huffman encoding for values. Permitted values are
+ \c "none", \c "english", \c "utf8<file>" or \c "utf16<file>".
+ See @ref huffman for more information'''),
+ Config('internal_key_truncate', 'true', r'''
+ configure internal key truncation, discarding unnecessary
+ trailing bytes on internal keys (ignored for custom
+ collators)''',
+ type='boolean'),
+ Config('internal_page_max', '4KB', r'''
+ the maximum page size for internal nodes, in bytes; the size
+ must be a multiple of the allocation size and is significant
+ for applications wanting to avoid excessive L2 cache misses
+ while searching the tree. The page maximum is the bytes of
+ uncompressed data, that is, the limit is applied before any
+ block compression is done''',
+ min='512B', max='512MB'),
+ Config('internal_item_max', '0', r'''
+ the largest key stored within an internal node, in bytes. If
+ non-zero, any key larger than the specified size will be
+ stored as an overflow item (which may require additional I/O
+ to access). If zero, a default size is chosen that permits at
+ least 8 keys per internal page''',
+ min=0),
+ Config('key_gap', '10', r'''
+ the maximum gap between instantiated keys in a Btree leaf page,
+ constraining the number of keys processed to instantiate a
+ random Btree leaf page key''',
+ min='0', undoc=True),
+ Config('leaf_page_max', '32KB', r'''
+ the maximum page size for leaf nodes, in bytes; the size must
+ be a multiple of the allocation size, and is significant for
+ applications wanting to maximize sequential data transfer from
+ a storage device. The page maximum is the bytes of uncompressed
+ data, that is, the limit is applied before any block compression
+ is done''',
+ min='512B', max='512MB'),
+ Config('leaf_item_max', '0', r'''
+ the largest key or value stored within a leaf node, in bytes.
+ If non-zero, any key or value larger than the specified size
+ will be stored as an overflow item (which may require additional
+ I/O to access). If zero, a default size is chosen that permits
+ at least 4 key and value pairs per leaf page''',
+ min=0),
+ Config('memory_page_max', '5MB', r'''
+ the maximum size a page can grow to in memory before being
+ reconciled to disk. The specified size will be adjusted to a lower
+ bound of <code>50 * leaf_page_max</code>, and an upper bound of
+ <code>cache_size / 2</code>. This limit is soft - it is possible
+ for pages to be temporarily larger than this value. This setting
+ is ignored for LSM trees, see \c chunk_size''',
+ min='512B', max='10TB'),
+ Config('os_cache_max', '0', r'''
+ maximum system buffer cache usage, in bytes. If non-zero, evict
+ object blocks from the system buffer cache after that many bytes
+ from this object are read or written into the buffer cache''',
+ min=0),
+ Config('os_cache_dirty_max', '0', r'''
+ maximum dirty system buffer cache usage, in bytes. If non-zero,
+ schedule writes for dirty blocks belonging to this object in the
+ system buffer cache after that many bytes from this object are
+ written into the buffer cache''',
+ min=0),
+ Config('prefix_compression', 'false', r'''
+ configure prefix compression on row-store leaf pages''',
+ type='boolean'),
+ Config('prefix_compression_min', '4', r'''
+ minimum gain before prefix compression will be used on row-store
+ leaf pages''',
+ min=0),
+ Config('split_pct', '75', r'''
+ the Btree page split size as a percentage of the maximum Btree
+ page size, that is, when a Btree page is split, it will be
+ split into smaller pages, where each page is the specified
+ percentage of the maximum Btree page size''',
+ min='25', max='100'),
]
# File metadata, including both configurable and non-configurable (internal)
file_meta = file_config + [
- Config('checkpoint', '', r'''
- the file checkpoint entries'''),
- Config('checkpoint_lsn', '', r'''
- LSN of the last checkpoint'''),
- Config('id', '', r'''
- the file's ID number'''),
- Config('version', '(major=0,minor=0)', r'''
- the file version'''),
+ Config('checkpoint', '', r'''
+ the file checkpoint entries'''),
+ Config('checkpoint_lsn', '', r'''
+ LSN of the last checkpoint'''),
+ Config('id', '', r'''
+ the file's ID number'''),
+ Config('version', '(major=0,minor=0)', r'''
+ the file version'''),
]
table_only_config = [
- Config('colgroups', '', r'''
- comma-separated list of names of column groups. Each column
- group is stored separately, keyed by the primary key of the
- table. If no column groups are specified, all columns are
- stored together in a single file. All value columns in the
- table must appear in at least one column group. Each column
- group must be created with a separate call to
- WT_SESSION::create''', type='list'),
+ Config('colgroups', '', r'''
+ comma-separated list of names of column groups. Each column
+ group is stored separately, keyed by the primary key of the
+ table. If no column groups are specified, all columns are
+ stored together in a single file. All value columns in the
+ table must appear in at least one column group. Each column
+ group must be created with a separate call to
+ WT_SESSION::create''', type='list'),
]
index_only_config = [
- Config('extractor', '', r'''
- configure custom extractor for indices. Value must be an extractor
- name created with WT_CONNECTION::add_extractor'''),
- Config('immutable', 'false', r'''
+ Config('extractor', '', r'''
+ configure custom extractor for indices. Permitted values are
+ \c "none" or an extractor name created with
+ WT_CONNECTION::add_extractor'''),
+ Config('immutable', 'false', r'''
configure the index to be immutable - that is an index is not changed
- by any update to a record in the table''', type='boolean'),
+ by any update to a record in the table''', type='boolean'),
]
colgroup_meta = common_meta + source_meta
index_meta = format_meta + source_meta + index_only_config + [
- Config('index_key_columns', '', r'''
- number of public key columns''', type='int', undoc=True),
+ Config('index_key_columns', '', r'''
+ number of public key columns''', type='int', undoc=True),
]
table_meta = format_meta + table_only_config
# Connection runtime config, shared by conn.reconfigure and wiredtiger_open
connection_runtime_config = [
- Config('async', '', r'''
- asynchronous operations configuration options''',
- type='category', subconfig=[
- Config('enabled', 'false', r'''
- enable asynchronous operation''',
- type='boolean'),
- Config('ops_max', '1024', r'''
- maximum number of expected simultaneous asynchronous
+ Config('async', '', r'''
+ asynchronous operations configuration options''',
+ type='category', subconfig=[
+ Config('enabled', 'false', r'''
+ enable asynchronous operation''',
+ type='boolean'),
+ Config('ops_max', '1024', r'''
+ maximum number of expected simultaneous asynchronous
operations''', min='10', max='4096'),
- Config('threads', '2', r'''
- the number of worker threads to service asynchronous
+ Config('threads', '2', r'''
+ the number of worker threads to service asynchronous
requests''',
min='1', max='20'), # !!! Must match WT_ASYNC_MAX_WORKERS
]),
- Config('cache_size', '100MB', r'''
- maximum heap memory to allocate for the cache. A database should
- configure either a cache_size or a shared_cache not both''',
- min='1MB', max='10TB'),
- Config('checkpoint', '', r'''
- periodically checkpoint the database''',
- type='category', subconfig=[
- Config('name', '"WiredTigerCheckpoint"', r'''
- the checkpoint name'''),
- Config('log_size', '0', r'''
- wait for this amount of log record bytes to be written to
+ Config('cache_size', '100MB', r'''
+ maximum heap memory to allocate for the cache. A database should
+ configure either a cache_size or a shared_cache not both''',
+ min='1MB', max='10TB'),
+ Config('checkpoint', '', r'''
+ periodically checkpoint the database''',
+ type='category', subconfig=[
+ Config('name', '"WiredTigerCheckpoint"', r'''
+ the checkpoint name'''),
+ Config('log_size', '0', r'''
+ wait for this amount of log record bytes to be written to
the log between each checkpoint. A database can configure
both log_size and wait to set an upper bound for checkpoints;
setting this value above 0 configures periodic checkpoints''',
- min='0', max='2GB'),
- Config('wait', '0', r'''
- seconds to wait between each checkpoint; setting this value
- above 0 configures periodic checkpoints''',
- min='0', max='100000'),
- ]),
- Config('error_prefix', '', r'''
- prefix string for error messages'''),
- Config('eviction_dirty_target', '80', r'''
- continue evicting until the cache has less dirty memory than the
- value, as a percentage of the total cache size. Dirty pages will
- only be evicted if the cache is full enough to trigger eviction''',
- min=10, max=99),
- Config('eviction_target', '80', r'''
- continue evicting until the cache has less total memory than the
- value, as a percentage of the total cache size. Must be less than
- \c eviction_trigger''',
- min=10, max=99),
- Config('eviction_trigger', '95', r'''
- trigger eviction when the cache is using this much memory, as a
- percentage of the total cache size''', min=10, max=99),
- Config('lsm_manager', '', r'''
- configure database wide options for LSM tree management''',
- type='category', subconfig=[
- Config('worker_thread_max', '4', r'''
- Configure a set of threads to manage merging LSM trees in
- the database.''',
- min='3', max='20'), # !!! Must match WT_LSM_MAX_WORKERS
- Config('merge', 'true', r'''
- merge LSM chunks where possible''',
- type='boolean')
- ]),
- Config('lsm_merge', 'true', r'''
- merge LSM chunks where possible (deprecated)''',
- type='boolean', undoc=True),
- Config('eviction', '', r'''
- eviction configuration options.''',
- type='category', subconfig=[
+ min='0', max='2GB'),
+ Config('wait', '0', r'''
+ seconds to wait between each checkpoint; setting this value
+ above 0 configures periodic checkpoints''',
+ min='0', max='100000'),
+ ]),
+ Config('error_prefix', '', r'''
+ prefix string for error messages'''),
+ Config('eviction_dirty_target', '80', r'''
+ continue evicting until the cache has less dirty memory than the
+ value, as a percentage of the total cache size. Dirty pages will
+ only be evicted if the cache is full enough to trigger eviction''',
+ min=10, max=99),
+ Config('eviction_target', '80', r'''
+ continue evicting until the cache has less total memory than the
+ value, as a percentage of the total cache size. Must be less than
+ \c eviction_trigger''',
+ min=10, max=99),
+ Config('eviction_trigger', '95', r'''
+ trigger eviction when the cache is using this much memory, as a
+ percentage of the total cache size''', min=10, max=99),
+ Config('lsm_manager', '', r'''
+ configure database wide options for LSM tree management''',
+ type='category', subconfig=[
+ Config('worker_thread_max', '4', r'''
+ Configure a set of threads to manage merging LSM trees in
+ the database.''',
+ min='3', max='20'), # !!! Must match WT_LSM_MAX_WORKERS
+ Config('merge', 'true', r'''
+ merge LSM chunks where possible''',
+ type='boolean')
+ ]),
+ Config('lsm_merge', 'true', r'''
+ merge LSM chunks where possible (deprecated)''',
+ type='boolean', undoc=True),
+ Config('eviction', '', r'''
+ eviction configuration options.''',
+ type='category', subconfig=[
Config('threads_max', '1', r'''
- maximum number of threads WiredTiger will start to help evict
- pages from cache. The number of threads started will vary
- depending on the current eviction load''',
+ maximum number of threads WiredTiger will start to help evict
+ pages from cache. The number of threads started will vary
+ depending on the current eviction load''',
min=1, max=20),
Config('threads_min', '1', r'''
minimum number of threads WiredTiger will start to help evict
@@ -385,179 +388,187 @@ connection_runtime_config = [
vary depending on the current eviction load''',
min=1, max=20),
]),
- Config('shared_cache', '', r'''
- shared cache configuration options. A database should configure
- either a cache_size or a shared_cache not both''',
- type='category', subconfig=[
- Config('chunk', '10MB', r'''
- the granularity that a shared cache is redistributed''',
- min='1MB', max='10TB'),
- Config('reserve', '0', r'''
- amount of cache this database is guaranteed to have
- available from the shared cache. This setting is per
- database. Defaults to the chunk size''', type='int'),
- Config('name', '', r'''
- name of a cache that is shared between databases'''),
- Config('size', '500MB', r'''
- maximum memory to allocate for the shared cache. Setting
- this will update the value if one is already set''',
- min='1MB', max='10TB')
- ]),
- Config('statistics', 'none', r'''
- Maintain database statistics, which may impact performance.
- Choosing "all" maintains all statistics regardless of cost,
- "fast" maintains a subset of statistics that are relatively
- inexpensive, "none" turns off all statistics. The "clear"
- configuration resets statistics after they are gathered,
- where appropriate (for example, a cache size statistic is
- not cleared, while the count of cursor insert operations will
- be cleared). When "clear" is configured for the database,
- gathered statistics are reset each time a statistics cursor
- is used to gather statistics, as well as each time statistics
- are logged using the \c statistics_log configuration. See
- @ref statistics for more information''',
- type='list', choices=['all', 'fast', 'none', 'clear']),
- Config('statistics_log', '', r'''
- log any statistics the database is configured to maintain,
- to a file. See @ref statistics for more information''',
- type='category', subconfig=[
- Config('on_close', 'false', r'''log statistics on database close''',
- type='boolean'),
- Config('path', '"WiredTigerStat.%d.%H"', r'''
- the pathname to a file into which the log records are written,
- may contain ISO C standard strftime conversion specifications.
- If the value is not an absolute path name, the file is created
- relative to the database home'''),
- Config('sources', '', r'''
- if non-empty, include statistics for the list of data source
- URIs, if they are open at the time of the statistics logging.
- The list may include URIs matching a single data source
- ("table:mytable"), or a URI matching all data sources of a
- particular type ("table:")''',
- type='list'),
- Config('timestamp', '"%b %d %H:%M:%S"', r'''
- a timestamp prepended to each log record, may contain strftime
- conversion specifications'''),
- Config('wait', '0', r'''
- seconds to wait between each write of the log records''',
- min='0', max='100000'),
- ]),
- Config('verbose', '', r'''
- enable messages for various events. Only available if WiredTiger
- is configured with --enable-verbose. Options are given as a
- list, such as <code>"verbose=[evictserver,read]"</code>''',
- type='list', choices=[
- 'api',
- 'block',
- 'checkpoint',
- 'compact',
- 'evict',
- 'evictserver',
- 'fileops',
- 'log',
- 'lsm',
- 'metadata',
- 'mutex',
- 'overflow',
- 'read',
- 'reconcile',
- 'recovery',
- 'salvage',
- 'shared_cache',
- 'split',
- 'temporary',
- 'transaction',
- 'verify',
- 'version',
- 'write']),
+ Config('shared_cache', '', r'''
+ shared cache configuration options. A database should configure
+ either a cache_size or a shared_cache not both''',
+ type='category', subconfig=[
+ Config('chunk', '10MB', r'''
+ the granularity that a shared cache is redistributed''',
+ min='1MB', max='10TB'),
+ Config('reserve', '0', r'''
+ amount of cache this database is guaranteed to have
+ available from the shared cache. This setting is per
+ database. Defaults to the chunk size''', type='int'),
+ Config('name', '', r'''
+ name of a cache that is shared between databases'''),
+ Config('size', '500MB', r'''
+ maximum memory to allocate for the shared cache. Setting
+ this will update the value if one is already set''',
+ min='1MB', max='10TB')
+ ]),
+ Config('statistics', 'none', r'''
+ Maintain database statistics, which may impact performance.
+ Choosing "all" maintains all statistics regardless of cost,
+ "fast" maintains a subset of statistics that are relatively
+ inexpensive, "none" turns off all statistics. The "clear"
+ configuration resets statistics after they are gathered,
+ where appropriate (for example, a cache size statistic is
+ not cleared, while the count of cursor insert operations will
+ be cleared). When "clear" is configured for the database,
+ gathered statistics are reset each time a statistics cursor
+ is used to gather statistics, as well as each time statistics
+ are logged using the \c statistics_log configuration. See
+ @ref statistics for more information''',
+ type='list', choices=['all', 'fast', 'none', 'clear']),
+ Config('statistics_log', '', r'''
+ log any statistics the database is configured to maintain,
+ to a file. See @ref statistics for more information''',
+ type='category', subconfig=[
+ Config('on_close', 'false', r'''log statistics on database close''',
+ type='boolean'),
+ Config('path', '"WiredTigerStat.%d.%H"', r'''
+ the pathname to a file into which the log records are written,
+ may contain ISO C standard strftime conversion specifications.
+ If the value is not an absolute path name, the file is created
+ relative to the database home'''),
+ Config('sources', '', r'''
+ if non-empty, include statistics for the list of data source
+ URIs, if they are open at the time of the statistics logging.
+ The list may include URIs matching a single data source
+ ("table:mytable"), or a URI matching all data sources of a
+ particular type ("table:")''',
+ type='list'),
+ Config('timestamp', '"%b %d %H:%M:%S"', r'''
+ a timestamp prepended to each log record, may contain strftime
+ conversion specifications'''),
+ Config('wait', '0', r'''
+ seconds to wait between each write of the log records''',
+ min='0', max='100000'),
+ ]),
+ Config('verbose', '', r'''
+ enable messages for various events. Only available if WiredTiger
+ is configured with --enable-verbose. Options are given as a
+ list, such as <code>"verbose=[evictserver,read]"</code>''',
+ type='list', choices=[
+ 'api',
+ 'block',
+ 'checkpoint',
+ 'compact',
+ 'evict',
+ 'evictserver',
+ 'fileops',
+ 'log',
+ 'lsm',
+ 'metadata',
+ 'mutex',
+ 'overflow',
+ 'read',
+ 'reconcile',
+ 'recovery',
+ 'salvage',
+ 'shared_cache',
+ 'split',
+ 'temporary',
+ 'transaction',
+ 'verify',
+ 'version',
+ 'write']),
]
session_config = [
- Config('isolation', 'read-committed', r'''
- the default isolation level for operations in this session''',
- choices=['read-uncommitted', 'read-committed', 'snapshot']),
+ Config('isolation', 'read-committed', r'''
+ the default isolation level for operations in this session''',
+ choices=['read-uncommitted', 'read-committed', 'snapshot']),
]
common_wiredtiger_open = [
- Config('buffer_alignment', '-1', r'''
- in-memory alignment (in bytes) for buffers used for I/O. The
- default value of -1 indicates a platform-specific alignment
- value should be used (4KB on Linux systems, zero elsewhere)''',
- min='-1', max='1MB'),
- Config('checkpoint_sync', 'true', r'''
- flush files to stable storage when closing or writing
- checkpoints''',
- type='boolean'),
- Config('direct_io', '', r'''
- Use \c O_DIRECT to access files. Options are given as a list,
- such as <code>"direct_io=[data]"</code>. Configuring
- \c direct_io requires care, see @ref
- tuning_system_buffer_cache_direct_io for important warnings.
- Including \c "data" will cause WiredTiger data files to use
- \c O_DIRECT, including \c "log" will cause WiredTiger log files
- to use \c O_DIRECT, and including \c "checkpoint" will cause
- WiredTiger data files opened at a checkpoint (i.e: read only) to
- use \c O_DIRECT''',
- type='list', choices=['checkpoint', 'data', 'log']),
- Config('extensions', '', r'''
- list of shared library extensions to load (using dlopen).
- Any values specified to an library extension are passed to
- WT_CONNECTION::load_extension as the \c config parameter
- (for example,
- <code>extensions=(/path/ext.so={entry=my_entry})</code>)''',
- type='list'),
- Config('file_extend', '', r'''
- file extension configuration. If set, extend files of the set
- type in allocations of the set size, instead of a block at a
- time as each new block is written. For example,
- <code>file_extend=(data=16MB)</code>''',
- type='list', choices=['data', 'log']),
- Config('hazard_max', '1000', r'''
- maximum number of simultaneous hazard pointers per session
- handle''',
- min='15'),
- Config('log', '', r'''
- enable logging''',
- type='category', subconfig=[
- Config('archive', 'true', r'''
- automatically archive unneeded log files''',
- type='boolean'),
- Config('enabled', 'false', r'''
- enable logging subsystem''',
- type='boolean'),
- Config('file_max', '100MB', r'''
- the maximum size of log files''',
- min='100KB', max='2GB'),
- Config('path', '""', r'''
- the path to a directory into which the log files are written.
- If the value is not an absolute path name, the files are created
- relative to the database home'''),
- ]),
- Config('mmap', 'true', r'''
- Use memory mapping to access files when possible''',
- type='boolean'),
- Config('multiprocess', 'false', r'''
- permit sharing between processes (will automatically start an
- RPC server for primary processes and use RPC for secondary
- processes). <b>Not yet supported in WiredTiger</b>''',
- type='boolean'),
- Config('session_max', '100', r'''
- maximum expected number of sessions (including server
- threads)''',
- min='1'),
- Config('transaction_sync', '', r'''
- how to sync log records when the transaction commits''',
- type='category', subconfig=[
- Config('enabled', 'false', r'''
- whether to sync the log on every commit by default, can
- be overridden by the \c sync setting to
- WT_SESSION::begin_transaction''',
- type='boolean'),
- Config('method', 'fsync', r'''
- the method used to ensure log records are stable on disk,
- see @ref tune_durability for more information''',
- choices=['dsync', 'fsync', 'none']),
- ]),
+ Config('buffer_alignment', '-1', r'''
+ in-memory alignment (in bytes) for buffers used for I/O. The
+ default value of -1 indicates a platform-specific alignment
+ value should be used (4KB on Linux systems, zero elsewhere)''',
+ min='-1', max='1MB'),
+ Config('checkpoint_sync', 'true', r'''
+ flush files to stable storage when closing or writing
+ checkpoints''',
+ type='boolean'),
+ Config('direct_io', '', r'''
+ Use \c O_DIRECT to access files. Options are given as a list,
+ such as <code>"direct_io=[data]"</code>. Configuring
+ \c direct_io requires care, see @ref
+ tuning_system_buffer_cache_direct_io for important warnings.
+ Including \c "data" will cause WiredTiger data files to use
+ \c O_DIRECT, including \c "log" will cause WiredTiger log files
+ to use \c O_DIRECT, and including \c "checkpoint" will cause
+ WiredTiger data files opened at a checkpoint (i.e: read only) to
+ use \c O_DIRECT''',
+ type='list', choices=['checkpoint', 'data', 'log']),
+ Config('extensions', '', r'''
+ list of shared library extensions to load (using dlopen).
+ Any values specified to an library extension are passed to
+ WT_CONNECTION::load_extension as the \c config parameter
+ (for example,
+ <code>extensions=(/path/ext.so={entry=my_entry})</code>)''',
+ type='list'),
+ Config('file_extend', '', r'''
+ file extension configuration. If set, extend files of the set
+ type in allocations of the set size, instead of a block at a
+ time as each new block is written. For example,
+ <code>file_extend=(data=16MB)</code>''',
+ type='list', choices=['data', 'log']),
+ Config('hazard_max', '1000', r'''
+ maximum number of simultaneous hazard pointers per session
+ handle''',
+ min='15'),
+ Config('log', '', r'''
+ enable logging''',
+ type='category', subconfig=[
+ Config('archive', 'true', r'''
+ automatically archive unneeded log files''',
+ type='boolean'),
+ Config('compressor', '', r'''
+ configure a compressor for log records. Permitted values are
+ empty (off) or \c "bzip2", \c "snappy" or custom compression
+ engine \c "name" created with WT_CONNECTION::add_compressor.
+ See @ref compression for more information'''),
+ Config('enabled', 'false', r'''
+ enable logging subsystem''',
+ type='boolean'),
+ Config('file_max', '100MB', r'''
+ the maximum size of log files''',
+ min='100KB', max='2GB'),
+ Config('path', '""', r'''
+ the path to a directory into which the log files are written.
+ If the value is not an absolute path name, the files are created
+ relative to the database home'''),
+ Config('prealloc', 'true', r'''
+ pre-allocate log files.''',
+ type='boolean'),
+ ]),
+ Config('mmap', 'true', r'''
+ Use memory mapping to access files when possible''',
+ type='boolean'),
+ Config('multiprocess', 'false', r'''
+ permit sharing between processes (will automatically start an
+ RPC server for primary processes and use RPC for secondary
+ processes). <b>Not yet supported in WiredTiger</b>''',
+ type='boolean'),
+ Config('session_max', '100', r'''
+ maximum expected number of sessions (including server
+ threads)''',
+ min='1'),
+ Config('transaction_sync', '', r'''
+ how to sync log records when the transaction commits''',
+ type='category', subconfig=[
+ Config('enabled', 'false', r'''
+ whether to sync the log on every commit by default, can
+ be overridden by the \c sync setting to
+ WT_SESSION::begin_transaction''',
+ type='boolean'),
+ Config('method', 'fsync', r'''
+ the method used to ensure log records are stable on disk,
+ see @ref tune_durability for more information''',
+ choices=['dsync', 'fsync', 'none']),
+ ]),
]
methods = {
@@ -574,186 +585,186 @@ methods = {
'session.close' : Method([]),
'session.compact' : Method([
- Config('timeout', '1200', r'''
- maximum amount of time to allow for compact in seconds. The
- actual amount of time spent in compact may exceed the configured
- value. A value of zero disables the timeout''',
- type='int'),
+ Config('timeout', '1200', r'''
+ maximum amount of time to allow for compact in seconds. The
+ actual amount of time spent in compact may exceed the configured
+ value. A value of zero disables the timeout''',
+ type='int'),
]),
'session.create' : Method(file_config + lsm_config + source_meta +
- index_only_config + table_only_config + [
- Config('exclusive', 'false', r'''
- fail if the object exists. When false (the default), if the
- object exists, check that its settings match the specified
- configuration''',
- type='boolean'),
+ index_only_config + table_only_config + [
+ Config('exclusive', 'false', r'''
+ fail if the object exists. When false (the default), if the
+ object exists, check that its settings match the specified
+ configuration''',
+ type='boolean'),
]),
'session.drop' : Method([
- Config('force', 'false', r'''
- return success if the object does not exist''',
- type='boolean'),
- Config('remove_files', 'true', r'''
- should the underlying files be removed?''',
- type='boolean'),
+ Config('force', 'false', r'''
+ return success if the object does not exist''',
+ type='boolean'),
+ Config('remove_files', 'true', r'''
+ should the underlying files be removed?''',
+ type='boolean'),
]),
'session.log_printf' : Method([]),
'session.open_cursor' : Method([
- Config('append', 'false', r'''
- append the value as a new record, creating a new record
- number key; valid only for cursors with record number keys''',
- type='boolean'),
- Config('bulk', 'false', r'''
- configure the cursor for bulk-loading, a fast, initial load
- path (see @ref tune_bulk_load for more information). Bulk-load
- may only be used for newly created objects and cursors
- configured for bulk-load only support the WT_CURSOR::insert
- and WT_CURSOR::close methods. When bulk-loading row-store
- objects, keys must be loaded in sorted order. The value is
- usually a true/false flag; when bulk-loading fixed-length
- column store objects, the special value \c bitmap allows
- chunks of a memory resident bitmap to be loaded directly into
- a file by passing a \c WT_ITEM to WT_CURSOR::set_value where
- the \c size field indicates the number of records in the
- bitmap (as specified by the object's \c value_format
- configuration). Bulk-loaded bitmap values must end on a byte
- boundary relative to the bit count (except for the last set
- of values loaded)'''),
- Config('checkpoint', '', r'''
- the name of a checkpoint to open (the reserved name
- "WiredTigerCheckpoint" opens the most recent internal
- checkpoint taken for the object). The cursor does not
- support data modification'''),
- Config('dump', '', r'''
- configure the cursor for dump format inputs and outputs: "hex"
- selects a simple hexadecimal format, "json" selects a JSON format
- with each record formatted as fields named by column names if
- available, and "print" selects a format where only non-printing
- characters are hexadecimal encoded. These formats are compatible
- with the @ref util_dump and @ref util_load commands''',
- choices=['hex', 'json', 'print']),
- Config('next_random', 'false', r'''
- configure the cursor to return a pseudo-random record from
- the object; valid only for row-store cursors. Cursors
- configured with \c next_random=true only support the
- WT_CURSOR::next and WT_CURSOR::close methods. See @ref
- cursor_random for details''',
- type='boolean'),
- Config('overwrite', 'true', r'''
- configures whether the cursor's insert, update and remove
- methods check the existing state of the record. If \c overwrite
- is \c false, WT_CURSOR::insert fails with ::WT_DUPLICATE_KEY
- if the record exists, WT_CURSOR::update and WT_CURSOR::remove
- fail with ::WT_NOTFOUND if the record does not exist''',
- type='boolean'),
- Config('raw', 'false', r'''
- ignore the encodings for the key and value, manage data as if
- the formats were \c "u". See @ref cursor_raw for details''',
- type='boolean'),
- Config('readonly', 'false', r'''
- only query operations are supported by this cursor. An error is
- returned if a modification is attempted using the cursor. The
- default is false for all cursor types except for log and metadata
- cursors''',
- type='boolean'),
- Config('skip_sort_check', 'false', r'''
- skip the check of the sort order of each bulk-loaded key''',
- type='boolean', undoc=True),
- Config('statistics', '', r'''
- Specify the statistics to be gathered. Choosing "all" gathers
- statistics regardless of cost and may include traversing
- on-disk files; "fast" gathers a subset of relatively
- inexpensive statistics. The selection must agree with the
- database \c statistics configuration specified to
- ::wiredtiger_open or WT_CONNECTION::reconfigure. For example,
- "all" or "fast" can be configured when the database is
- configured with "all", but the cursor open will fail if "all"
- is specified when the database is configured with "fast",
- and the cursor open will fail in all cases when the database
- is configured with "none". If \c statistics is not configured,
- the default configuration is the database configuration.
- The "clear" configuration resets statistics after gathering
- them, where appropriate (for example, a cache size statistic
- is not cleared, while the count of cursor insert operations
- will be cleared). See @ref statistics for more information''',
- type='list', choices=['all', 'fast', 'clear']),
- Config('target', '', r'''
- if non-empty, backup the list of objects; valid only for a
- backup data source''',
- type='list'),
+ Config('append', 'false', r'''
+ append the value as a new record, creating a new record
+ number key; valid only for cursors with record number keys''',
+ type='boolean'),
+ Config('bulk', 'false', r'''
+ configure the cursor for bulk-loading, a fast, initial load
+ path (see @ref tune_bulk_load for more information). Bulk-load
+ may only be used for newly created objects and cursors
+ configured for bulk-load only support the WT_CURSOR::insert
+ and WT_CURSOR::close methods. When bulk-loading row-store
+ objects, keys must be loaded in sorted order. The value is
+ usually a true/false flag; when bulk-loading fixed-length
+ column store objects, the special value \c bitmap allows
+ chunks of a memory resident bitmap to be loaded directly into
+ a file by passing a \c WT_ITEM to WT_CURSOR::set_value where
+ the \c size field indicates the number of records in the
+ bitmap (as specified by the object's \c value_format
+ configuration). Bulk-loaded bitmap values must end on a byte
+ boundary relative to the bit count (except for the last set
+ of values loaded)'''),
+ Config('checkpoint', '', r'''
+ the name of a checkpoint to open (the reserved name
+ "WiredTigerCheckpoint" opens the most recent internal
+ checkpoint taken for the object). The cursor does not
+ support data modification'''),
+ Config('dump', '', r'''
+ configure the cursor for dump format inputs and outputs: "hex"
+ selects a simple hexadecimal format, "json" selects a JSON format
+ with each record formatted as fields named by column names if
+ available, and "print" selects a format where only non-printing
+ characters are hexadecimal encoded. These formats are compatible
+ with the @ref util_dump and @ref util_load commands''',
+ choices=['hex', 'json', 'print']),
+ Config('next_random', 'false', r'''
+ configure the cursor to return a pseudo-random record from
+ the object; valid only for row-store cursors. Cursors
+ configured with \c next_random=true only support the
+ WT_CURSOR::next and WT_CURSOR::close methods. See @ref
+ cursor_random for details''',
+ type='boolean'),
+ Config('overwrite', 'true', r'''
+ configures whether the cursor's insert, update and remove
+ methods check the existing state of the record. If \c overwrite
+ is \c false, WT_CURSOR::insert fails with ::WT_DUPLICATE_KEY
+ if the record exists, WT_CURSOR::update and WT_CURSOR::remove
+ fail with ::WT_NOTFOUND if the record does not exist''',
+ type='boolean'),
+ Config('raw', 'false', r'''
+ ignore the encodings for the key and value, manage data as if
+ the formats were \c "u". See @ref cursor_raw for details''',
+ type='boolean'),
+ Config('readonly', 'false', r'''
+ only query operations are supported by this cursor. An error is
+ returned if a modification is attempted using the cursor. The
+ default is false for all cursor types except for log and metadata
+ cursors''',
+ type='boolean'),
+ Config('skip_sort_check', 'false', r'''
+ skip the check of the sort order of each bulk-loaded key''',
+ type='boolean', undoc=True),
+ Config('statistics', '', r'''
+ Specify the statistics to be gathered. Choosing "all" gathers
+ statistics regardless of cost and may include traversing
+ on-disk files; "fast" gathers a subset of relatively
+ inexpensive statistics. The selection must agree with the
+ database \c statistics configuration specified to
+ ::wiredtiger_open or WT_CONNECTION::reconfigure. For example,
+ "all" or "fast" can be configured when the database is
+ configured with "all", but the cursor open will fail if "all"
+ is specified when the database is configured with "fast",
+ and the cursor open will fail in all cases when the database
+ is configured with "none". If \c statistics is not configured,
+ the default configuration is the database configuration.
+ The "clear" configuration resets statistics after gathering
+ them, where appropriate (for example, a cache size statistic
+ is not cleared, while the count of cursor insert operations
+ will be cleared). See @ref statistics for more information''',
+ type='list', choices=['all', 'fast', 'clear']),
+ Config('target', '', r'''
+ if non-empty, backup the list of objects; valid only for a
+ backup data source''',
+ type='list'),
]),
'session.rename' : Method([]),
'session.salvage' : Method([
- Config('force', 'false', r'''
- force salvage even of files that do not appear to be WiredTiger
- files''',
- type='boolean'),
+ Config('force', 'false', r'''
+ force salvage even of files that do not appear to be WiredTiger
+ files''',
+ type='boolean'),
]),
'session.truncate' : Method([]),
'session.upgrade' : Method([]),
'session.verify' : Method([
- Config('dump_address', 'false', r'''
- Display addresses and page types as pages are verified, using
- the application's message handler, intended for debugging''',
- type='boolean'),
- Config('dump_blocks', 'false', r'''
- Display the contents of on-disk blocks as they are verified, using
- the application's message handler, intended for debugging''',
- type='boolean'),
- Config('dump_offsets', '', r'''
- Display the contents of specific on-disk blocks, using
- the application's message handler, intended for debugging''',
- type='list'),
- Config('dump_pages', 'false', r'''
- Display the contents of in-memory pages as they are verified, using
- the application's message handler, intended for debugging''',
- type='boolean')
+ Config('dump_address', 'false', r'''
+ Display addresses and page types as pages are verified, using
+ the application's message handler, intended for debugging''',
+ type='boolean'),
+ Config('dump_blocks', 'false', r'''
+ Display the contents of on-disk blocks as they are verified, using
+ the application's message handler, intended for debugging''',
+ type='boolean'),
+ Config('dump_offsets', '', r'''
+ Display the contents of specific on-disk blocks, using
+ the application's message handler, intended for debugging''',
+ type='list'),
+ Config('dump_pages', 'false', r'''
+ Display the contents of in-memory pages as they are verified, using
+ the application's message handler, intended for debugging''',
+ type='boolean')
]),
'session.begin_transaction' : Method([
- Config('isolation', '', r'''
- the isolation level for this transaction; defaults to the
- session's isolation level''',
- choices=['read-uncommitted', 'read-committed', 'snapshot']),
- Config('name', '', r'''
- name of the transaction for tracing and debugging'''),
- Config('priority', 0, r'''
- priority of the transaction for resolving conflicts.
- Transactions with higher values are less likely to abort''',
- min='-100', max='100'),
- Config('sync', '', r'''
- whether to sync log records when the transaction commits,
- inherited from ::wiredtiger_open \c transaction_sync''',
- type='boolean'),
+ Config('isolation', '', r'''
+ the isolation level for this transaction; defaults to the
+ session's isolation level''',
+ choices=['read-uncommitted', 'read-committed', 'snapshot']),
+ Config('name', '', r'''
+ name of the transaction for tracing and debugging'''),
+ Config('priority', 0, r'''
+ priority of the transaction for resolving conflicts.
+ Transactions with higher values are less likely to abort''',
+ min='-100', max='100'),
+ Config('sync', '', r'''
+ whether to sync log records when the transaction commits,
+ inherited from ::wiredtiger_open \c transaction_sync''',
+ type='boolean'),
]),
'session.commit_transaction' : Method([]),
'session.rollback_transaction' : Method([]),
'session.checkpoint' : Method([
- Config('drop', '', r'''
- specify a list of checkpoints to drop.
- The list may additionally contain one of the following keys:
- \c "from=all" to drop all checkpoints,
- \c "from=<checkpoint>" to drop all checkpoints after and
- including the named checkpoint, or
- \c "to=<checkpoint>" to drop all checkpoints before and
- including the named checkpoint. Checkpoints cannot be
- dropped while a hot backup is in progress or if open in
- a cursor''', type='list'),
- Config('force', 'false', r'''
- by default, checkpoints may be skipped if the underlying object
- has not been modified, this option forces the checkpoint''',
- type='boolean'),
- Config('name', '', r'''
- if non-empty, specify a name for the checkpoint (note that
- checkpoints including LSM trees may not be named)'''),
- Config('target', '', r'''
- if non-empty, checkpoint the list of objects''', type='list'),
+ Config('drop', '', r'''
+ specify a list of checkpoints to drop.
+ The list may additionally contain one of the following keys:
+ \c "from=all" to drop all checkpoints,
+ \c "from=<checkpoint>" to drop all checkpoints after and
+ including the named checkpoint, or
+ \c "to=<checkpoint>" to drop all checkpoints before and
+ including the named checkpoint. Checkpoints cannot be
+ dropped while a hot backup is in progress or if open in
+ a cursor''', type='list'),
+ Config('force', 'false', r'''
+ by default, checkpoints may be skipped if the underlying object
+ has not been modified, this option forces the checkpoint''',
+ type='boolean'),
+ Config('name', '', r'''
+ if set, specify a name for the checkpoint (note that checkpoints
+ including LSM trees may not be named)'''),
+ Config('target', '', r'''
+ if non-empty, checkpoint the list of objects''', type='list'),
]),
'connection.add_collator' : Method([]),
@@ -761,47 +772,47 @@ methods = {
'connection.add_data_source' : Method([]),
'connection.add_extractor' : Method([]),
'connection.async_new_op' : Method([
- Config('append', 'false', r'''
- append the value as a new record, creating a new record
- number key; valid only for operations with record number keys''',
- type='boolean'),
- Config('overwrite', 'true', r'''
- configures whether the cursor's insert, update and remove
- methods check the existing state of the record. If \c overwrite
- is \c false, WT_CURSOR::insert fails with ::WT_DUPLICATE_KEY
- if the record exists, WT_CURSOR::update and WT_CURSOR::remove
- fail with ::WT_NOTFOUND if the record does not exist''',
- type='boolean'),
- Config('raw', 'false', r'''
- ignore the encodings for the key and value, manage data as if
- the formats were \c "u". See @ref cursor_raw for details''',
- type='boolean'),
- Config('timeout', '1200', r'''
- maximum amount of time to allow for compact in seconds. The
- actual amount of time spent in compact may exceed the configured
- value. A value of zero disables the timeout''',
- type='int'),
+ Config('append', 'false', r'''
+ append the value as a new record, creating a new record
+ number key; valid only for operations with record number keys''',
+ type='boolean'),
+ Config('overwrite', 'true', r'''
+ configures whether the cursor's insert, update and remove
+ methods check the existing state of the record. If \c overwrite
+ is \c false, WT_CURSOR::insert fails with ::WT_DUPLICATE_KEY
+ if the record exists, WT_CURSOR::update and WT_CURSOR::remove
+ fail with ::WT_NOTFOUND if the record does not exist''',
+ type='boolean'),
+ Config('raw', 'false', r'''
+ ignore the encodings for the key and value, manage data as if
+ the formats were \c "u". See @ref cursor_raw for details''',
+ type='boolean'),
+ Config('timeout', '1200', r'''
+ maximum amount of time to allow for compact in seconds. The
+ actual amount of time spent in compact may exceed the configured
+ value. A value of zero disables the timeout''',
+ type='int'),
]),
'connection.close' : Method([
- Config('leak_memory', 'false', r'''
- don't free memory during close''',
- type='boolean'),
+ Config('leak_memory', 'false', r'''
+ don't free memory during close''',
+ type='boolean'),
]),
'connection.reconfigure' : Method(connection_runtime_config),
'connection.load_extension' : Method([
- Config('config', '', r'''
- configuration string passed to the entry point of the
- extension as its WT_CONFIG_ARG argument'''),
- Config('entry', 'wiredtiger_extension_init', r'''
- the entry point of the extension, called to initialize the
- extension when it is loaded. The signature of the function
- must match ::wiredtiger_extension_init'''),
- Config('terminate', 'wiredtiger_extension_terminate', r'''
- an optional function in the extension that is called before
- the extension is unloaded during WT_CONNECTION::close. The
- signature of the function must match
- ::wiredtiger_extension_terminate'''),
+ Config('config', '', r'''
+ configuration string passed to the entry point of the
+ extension as its WT_CONFIG_ARG argument'''),
+ Config('entry', 'wiredtiger_extension_init', r'''
+ the entry point of the extension, called to initialize the
+ extension when it is loaded. The signature of the function
+ must match ::wiredtiger_extension_init'''),
+ Config('terminate', 'wiredtiger_extension_terminate', r'''
+ an optional function in the extension that is called before
+ the extension is unloaded during WT_CONNECTION::close. The
+ signature of the function must match
+ ::wiredtiger_extension_terminate'''),
]),
'connection.open_session' : Method(session_config),
@@ -810,66 +821,66 @@ methods = {
# There are 4 variants of the wiredtiger_open configurations.
# wiredtiger_open:
-# Configuration values allowed in the application's configuration
-# argument to the wiredtiger_open call.
+# Configuration values allowed in the application's configuration
+# argument to the wiredtiger_open call.
# wiredtiger_open_basecfg:
-# Configuration values allowed in the WiredTiger.basecfg file (remove
+# Configuration values allowed in the WiredTiger.basecfg file (remove
# creation-specific configuration strings and add a version string).
# wiredtiger_open_usercfg:
-# Configuration values allowed in the WiredTiger.config file (remove
+# Configuration values allowed in the WiredTiger.config file (remove
# creation-specific configuration strings).
# wiredtiger_open_all:
-# All of the above configuration values combined
+# All of the above configuration values combined
'wiredtiger_open' : Method(
- connection_runtime_config +
- common_wiredtiger_open + [
- Config('config_base', 'true', r'''
- write the base configuration file if creating the database,
- see @ref config_base for more information''',
- type='boolean'),
- Config('create', 'false', r'''
- create the database if it does not exist''',
- type='boolean'),
- Config('exclusive', 'false', r'''
- fail if the database already exists, generally used with the
- \c create option''',
- type='boolean'),
- Config('use_environment_priv', 'false', r'''
- use the \c WIREDTIGER_CONFIG and \c WIREDTIGER_HOME environment
- variables regardless of whether or not the process is running
- with special privileges. See @ref home for more information''',
- type='boolean'),
+ connection_runtime_config +
+ common_wiredtiger_open + [
+ Config('config_base', 'true', r'''
+ write the base configuration file if creating the database,
+ see @ref config_base for more information''',
+ type='boolean'),
+ Config('create', 'false', r'''
+ create the database if it does not exist''',
+ type='boolean'),
+ Config('exclusive', 'false', r'''
+ fail if the database already exists, generally used with the
+ \c create option''',
+ type='boolean'),
+ Config('use_environment_priv', 'false', r'''
+ use the \c WIREDTIGER_CONFIG and \c WIREDTIGER_HOME environment
+ variables regardless of whether or not the process is running
+ with special privileges. See @ref home for more information''',
+ type='boolean'),
]),
'wiredtiger_open_basecfg' : Method(
- connection_runtime_config +
- common_wiredtiger_open + [
- Config('version', '(major=0,minor=0)', r'''
- the file version'''),
+ connection_runtime_config +
+ common_wiredtiger_open + [
+ Config('version', '(major=0,minor=0)', r'''
+ the file version'''),
]),
'wiredtiger_open_usercfg' : Method(
- connection_runtime_config +
- common_wiredtiger_open
+ connection_runtime_config +
+ common_wiredtiger_open
),
'wiredtiger_open_all' : Method(
- connection_runtime_config +
- common_wiredtiger_open + [
- Config('config_base', 'true', r'''
- write the base configuration file if creating the database,
- see @ref config_base for more information''',
- type='boolean'),
- Config('create', 'false', r'''
- create the database if it does not exist''',
- type='boolean'),
- Config('exclusive', 'false', r'''
- fail if the database already exists, generally used with the
- \c create option''',
- type='boolean'),
- Config('use_environment_priv', 'false', r'''
- use the \c WIREDTIGER_CONFIG and \c WIREDTIGER_HOME environment
- variables regardless of whether or not the process is running
- with special privileges. See @ref home for more information''',
- type='boolean'),
- Config('version', '(major=0,minor=0)', r'''
- the file version'''),
+ connection_runtime_config +
+ common_wiredtiger_open + [
+ Config('config_base', 'true', r'''
+ write the base configuration file if creating the database,
+ see @ref config_base for more information''',
+ type='boolean'),
+ Config('create', 'false', r'''
+ create the database if it does not exist''',
+ type='boolean'),
+ Config('exclusive', 'false', r'''
+ fail if the database already exists, generally used with the
+ \c create option''',
+ type='boolean'),
+ Config('use_environment_priv', 'false', r'''
+ use the \c WIREDTIGER_CONFIG and \c WIREDTIGER_HOME environment
+ variables regardless of whether or not the process is running
+ with special privileges. See @ref home for more information''',
+ type='boolean'),
+ Config('version', '(major=0,minor=0)', r'''
+ the file version'''),
]),
}
diff --git a/src/third_party/wiredtiger/dist/api_err.py b/src/third_party/wiredtiger/dist/api_err.py
index 352bfd5ca94..0c61a41ff28 100644
--- a/src/third_party/wiredtiger/dist/api_err.py
+++ b/src/third_party/wiredtiger/dist/api_err.py
@@ -2,42 +2,77 @@
# message code in strerror.c.
import re, textwrap
-
-import api_data
from dist import compare_srcfile
+class Error:
+ def __init__(self, name, value, desc, long_desc=None, **flags):
+ self.name = name
+ self.value = value
+ self.desc = desc
+ self.long_desc = long_desc
+ self.flags = flags
+
+# We don't want our error returns to conflict with any other package,
+# so use an uncommon range, specifically, -31,800 to -31,999.
+#
+# These numbers cannot change without breaking backward compatibility,
+# and are listed in error value order.
+errors = [
+ Error('WT_ROLLBACK', -31800,
+ 'conflict between concurrent operations', '''
+ This error is generated when an operation cannot be completed
+ due to a conflict with concurrent operations. The operation
+ may be retried; if a transaction is in progress, it should be
+ rolled back and the operation retried in a new transaction.'''),
+ Error('WT_DUPLICATE_KEY', -31801,
+ 'attempt to insert an existing key', '''
+ This error is generated when the application attempts to insert
+ a record with the same key as an existing record without the
+ 'overwrite' configuration to WT_SESSION::open_cursor.'''),
+ Error('WT_ERROR', -31802,
+ 'non-specific WiredTiger error', '''
+ This error is returned when an error is not covered by a
+ specific error return.'''),
+ Error('WT_NOTFOUND', -31803,
+ 'item not found', '''
+ This error indicates an operation did not find a value to
+ return. This includes cursor search and other operations
+ where no record matched the cursor's search key such as
+ WT_CURSOR::update or WT_CURSOR::remove.'''),
+ Error('WT_PANIC', -31804,
+ 'WiredTiger library panic', '''
+ This error indicates an underlying problem that requires the
+ application exit and restart.'''),
+ Error('WT_RESTART', -31805,
+ 'restart the operation (internal)', undoc=True),
+]
+
# Update the #defines in the wiredtiger.in file.
tmp_file = '__tmp'
tfile = open(tmp_file, 'w')
skip = 0
for line in open('../src/include/wiredtiger.in', 'r'):
- if not skip:
- tfile.write(line)
- if line.count('Error return section: END'):
- tfile.write(line)
- skip = 0
- elif line.count('Error return section: BEGIN'):
- tfile.write(' */\n')
- skip = 1
-
- # We don't want our error returns to conflict with any other
- # package, so use an uncommon range, specifically, -31,800 to
- # -31,999.
- v = -31800
- for err in api_data.errors:
- if 'undoc' in err.flags:
- tfile.write('/*! @cond internal */\n')
- tfile.write('/*!%s.%s */\n' %
- (('\n * ' if err.long_desc else ' ') +
- err.desc[0].upper() + err.desc[1:],
- ''.join('\n * ' + l for l in textwrap.wrap(
- textwrap.dedent(err.long_desc).strip(), 77)) +
- '\n' if err.long_desc else ''))
- tfile.write('#define\t%s\t%d\n' % (err.name, v))
- v -= 1
- if 'undoc' in err.flags:
- tfile.write('/*! @endcond */\n')
- tfile.write('/*\n')
+ if not skip:
+ tfile.write(line)
+ if line.count('Error return section: END'):
+ tfile.write(line)
+ skip = 0
+ elif line.count('Error return section: BEGIN'):
+ tfile.write(' */\n')
+ skip = 1
+ for err in errors:
+ if 'undoc' in err.flags:
+ tfile.write('/*! @cond internal */\n')
+ tfile.write('/*!%s.%s */\n' %
+ (('\n * ' if err.long_desc else ' ') +
+ err.desc[0].upper() + err.desc[1:],
+ ''.join('\n * ' + l for l in textwrap.wrap(
+ textwrap.dedent(err.long_desc).strip(), 77)) +
+ '\n' if err.long_desc else ''))
+ tfile.write('#define\t%s\t%d\n' % (err.name, err.value))
+ if 'undoc' in err.flags:
+ tfile.write('/*! @endcond */\n')
+ tfile.write('/*\n')
tfile.close()
compare_srcfile(tmp_file, '../src/include/wiredtiger.in')
@@ -50,37 +85,37 @@ tfile.write('''/* DO NOT EDIT: automatically built by dist/api_err.py. */
/*
* wiredtiger_strerror --
- * Return a string for any error value.
+ *\tReturn a string for any error value.
*/
const char *
wiredtiger_strerror(int error)
{
- static char errbuf[64];
- char *p;
+\tstatic char errbuf[64];
+\tchar *p;
- if (error == 0)
- return ("Successful return: 0");
+\tif (error == 0)
+\t\treturn ("Successful return: 0");
- switch (error) {
+\tswitch (error) {
''')
-for err in api_data.errors:
- tfile.write('\tcase ' + err.name + ':\n')
- tfile.write('\t\treturn ("' + err.name + ': ' + err.desc + '");\n')
+for err in errors:
+ tfile.write('\tcase ' + err.name + ':\n')
+ tfile.write('\t\treturn ("' + err.name + ': ' + err.desc + '");\n')
tfile.write('''\
- default:
- if (error > 0 && (p = strerror(error)) != NULL)
- return (p);
- break;
- }
+\tdefault:
+\t\tif (error > 0 && (p = strerror(error)) != NULL)
+\t\t\treturn (p);
+\t\tbreak;
+\t}
- /*
- * !!!
- * Not thread-safe, but this is never supposed to happen.
- */
- (void)snprintf(errbuf, sizeof(errbuf), "Unknown error: %d", error);
- return (errbuf);
+\t/*
+\t * !!!
+\t * Not thread-safe, but this is never supposed to happen.
+\t */
+\t(void)snprintf(errbuf, sizeof(errbuf), "Unknown error: %d", error);
+\treturn (errbuf);
}
''')
tfile.close()
@@ -92,20 +127,20 @@ tmp_file = '__tmp'
tfile = open(tmp_file, 'w')
skip = 0
for line in open(doc, 'r'):
- if not skip:
- tfile.write(line)
- if line.count('IGNORE_BUILT_BY_API_ERR_END'):
- tfile.write(line)
- skip = 0
- elif line.count('IGNORE_BUILT_BY_API_ERR_BEGIN'):
- tfile.write('@endif\n\n')
- skip = 1
+ if not skip:
+ tfile.write(line)
+ if line.count('IGNORE_BUILT_BY_API_ERR_END'):
+ tfile.write(line)
+ skip = 0
+ elif line.count('IGNORE_BUILT_BY_API_ERR_BEGIN'):
+ tfile.write('@endif\n\n')
+ skip = 1
- for err in api_data.errors:
- if 'undoc' in err.flags:
- continue
- tfile.write(
- '@par <code>' + err.name.upper() + '</code>\n' +
- " ".join(err.long_desc.split()) + '\n\n')
+ for err in errors:
+ if 'undoc' in err.flags:
+ continue
+ tfile.write(
+ '@par <code>' + err.name.upper() + '</code>\n' +
+ " ".join(err.long_desc.split()) + '\n\n')
tfile.close()
compare_srcfile(tmp_file, doc)
diff --git a/src/third_party/wiredtiger/dist/db.py b/src/third_party/wiredtiger/dist/db.py
index 06a9484d1f9..938d36d8d62 100644
--- a/src/third_party/wiredtiger/dist/db.py
+++ b/src/third_party/wiredtiger/dist/db.py
@@ -3,22 +3,22 @@
import getopt, random, sys
-dmin = 7 # Minimum data size
-dmax = 837 # Maximum data size
+dmin = 7 # Minimum data size
+dmax = 837 # Maximum data size
-seed = None # Random number seed
-pairs = 100000 # Key/data pairs to output
+seed = None # Random number seed
+pairs = 100000 # Key/data pairs to output
opts, args = getopt.getopt(sys.argv[1:], "m:n:s:")
for o, a in opts:
- if o == "-m":
- dmax = int(a)
- elif o == "-n":
- pairs = int(a)
- elif o == "-s":
- seed = int(a)
+ if o == "-m":
+ dmax = int(a)
+ elif o == "-n":
+ pairs = int(a)
+ elif o == "-s":
+ seed = int(a)
random.seed(seed)
for i in range(pairs):
- fmt = "%010d\ndata: %0" + str(random.randrange(dmin, dmax)) + "d"
- print(fmt % (i, i))
+ fmt = "%010d\ndata: %0" + str(random.randrange(dmin, dmax)) + "d"
+ print(fmt % (i, i))
diff --git a/src/third_party/wiredtiger/dist/dist.py b/src/third_party/wiredtiger/dist/dist.py
index 6994a9128af..2ea088ba3f1 100644
--- a/src/third_party/wiredtiger/dist/dist.py
+++ b/src/third_party/wiredtiger/dist/dist.py
@@ -1,35 +1,35 @@
import filecmp, glob, os, re, shutil
# source_files --
-# Return a list of the WiredTiger source file names.
+# Return a list of the WiredTiger source file names.
def source_files(skip_includes=False):
- if not skip_includes:
- for line in glob.iglob('../src/include/*.[hi]'):
- yield line
- file_re = re.compile(r'^\w')
- for line in open('filelist', 'r'):
- if file_re.match(line):
- yield os.path.join('..', line.rstrip())
- for line in open('extlist', 'r'):
- if file_re.match(line):
- yield os.path.join('..', line.rstrip())
+ if not skip_includes:
+ for line in glob.iglob('../src/include/*.[hi]'):
+ yield line
+ file_re = re.compile(r'^\w')
+ for line in open('filelist', 'r'):
+ if file_re.match(line):
+ yield os.path.join('..', line.rstrip())
+ for line in open('extlist', 'r'):
+ if file_re.match(line):
+ yield os.path.join('..', line.rstrip())
# source_dirs --
-# Return a list of the WiredTiger source directory names.
+# Return a list of the WiredTiger source directory names.
def source_dirs():
- dirs = set()
- for f in source_files():
- dirs.add(os.path.dirname(f))
- return dirs
+ dirs = set()
+ for f in source_files():
+ dirs.add(os.path.dirname(f))
+ return dirs
def print_source_dirs():
- for d in source_dirs():
- print d
+ for d in source_dirs():
+ print d
# compare_srcfile --
-# Compare two files, and if they differ, update the source file.
+# Compare two files, and if they differ, update the source file.
def compare_srcfile(tmp, src):
- if not os.path.isfile(src) or not filecmp.cmp(tmp, src, shallow=False):
- print('Updating ' + src)
- shutil.copyfile(tmp, src)
- os.remove(tmp)
+ if not os.path.isfile(src) or not filecmp.cmp(tmp, src, shallow=False):
+ print('Updating ' + src)
+ shutil.copyfile(tmp, src)
+ os.remove(tmp)
diff --git a/src/third_party/wiredtiger/dist/filelist b/src/third_party/wiredtiger/dist/filelist
index 6fa967d1504..8929e0076ba 100644
--- a/src/third_party/wiredtiger/dist/filelist
+++ b/src/third_party/wiredtiger/dist/filelist
@@ -24,7 +24,6 @@ src/btree/bt_cursor.c
src/btree/bt_debug.c
src/btree/bt_delete.c
src/btree/bt_discard.c
-src/btree/bt_evict.c
src/btree/bt_handle.c
src/btree/bt_huffman.c
src/btree/bt_io.c
@@ -34,6 +33,7 @@ src/btree/bt_page.c
src/btree/bt_read.c
src/btree/bt_ret.c
src/btree/bt_slvg.c
+src/btree/bt_split.c
src/btree/bt_stat.c
src/btree/bt_sync.c
src/btree/bt_upgrade.c
@@ -42,10 +42,6 @@ src/btree/bt_vrfy_dsk.c
src/btree/bt_walk.c
src/btree/col_modify.c
src/btree/col_srch.c
-src/btree/rec_evict.c
-src/btree/rec_split.c
-src/btree/rec_track.c
-src/btree/rec_write.c
src/btree/row_key.c
src/btree/row_modify.c
src/btree/row_srch.c
@@ -82,6 +78,9 @@ src/cursor/cur_metadata.c
src/cursor/cur_stat.c
src/cursor/cur_std.c
src/cursor/cur_table.c
+src/evict/evict_file.c
+src/evict/evict_lru.c
+src/evict/evict_page.c
src/log/log.c
src/log/log_auto.c
src/log/log_slot.c
@@ -130,6 +129,8 @@ src/os_posix/os_yield.c
src/packing/pack_api.c
src/packing/pack_impl.c
src/packing/pack_stream.c
+src/reconcile/rec_track.c
+src/reconcile/rec_write.c
src/schema/schema_create.c
src/schema/schema_drop.c
src/schema/schema_list.c
diff --git a/src/third_party/wiredtiger/dist/filelist.win b/src/third_party/wiredtiger/dist/filelist.win
index 9a0f3a27440..53e18cbff1a 100644
--- a/src/third_party/wiredtiger/dist/filelist.win
+++ b/src/third_party/wiredtiger/dist/filelist.win
@@ -24,7 +24,6 @@ src/btree/bt_cursor.c
src/btree/bt_debug.c
src/btree/bt_delete.c
src/btree/bt_discard.c
-src/btree/bt_evict.c
src/btree/bt_handle.c
src/btree/bt_huffman.c
src/btree/bt_io.c
@@ -33,6 +32,7 @@ src/btree/bt_ovfl.c
src/btree/bt_page.c
src/btree/bt_read.c
src/btree/bt_ret.c
+src/btree/bt_split.c
src/btree/bt_slvg.c
src/btree/bt_stat.c
src/btree/bt_sync.c
@@ -42,10 +42,6 @@ src/btree/bt_vrfy_dsk.c
src/btree/bt_walk.c
src/btree/col_modify.c
src/btree/col_srch.c
-src/btree/rec_evict.c
-src/btree/rec_split.c
-src/btree/rec_track.c
-src/btree/rec_write.c
src/btree/row_key.c
src/btree/row_modify.c
src/btree/row_srch.c
@@ -82,6 +78,9 @@ src/cursor/cur_metadata.c
src/cursor/cur_stat.c
src/cursor/cur_std.c
src/cursor/cur_table.c
+src/evict/evict_file.c
+src/evict/evict_lru.c
+src/evict/evict_page.c
src/log/log.c
src/log/log_auto.c
src/log/log_slot.c
@@ -131,6 +130,8 @@ src/os_win/os_yield.c
src/packing/pack_api.c
src/packing/pack_impl.c
src/packing/pack_stream.c
+src/reconcile/rec_track.c
+src/reconcile/rec_write.c
src/schema/schema_create.c
src/schema/schema_drop.c
src/schema/schema_list.c
diff --git a/src/third_party/wiredtiger/dist/flags.py b/src/third_party/wiredtiger/dist/flags.py
index 1f8a376fcfc..2d68d932c02 100644
--- a/src/third_party/wiredtiger/dist/flags.py
+++ b/src/third_party/wiredtiger/dist/flags.py
@@ -8,133 +8,133 @@ flags = {
###################################################
# Internal routine flag declarations
###################################################
- 'cache_flush' : [
- 'SYNC_CHECKPOINT',
- 'SYNC_CLOSE',
- 'SYNC_DISCARD',
- 'SYNC_DISCARD_FORCE',
- 'SYNC_WRITE_LEAVES',
- ],
- 'file_types' : [
- 'FILE_TYPE_CHECKPOINT',
- 'FILE_TYPE_DATA',
- 'FILE_TYPE_DIRECTORY',
- 'FILE_TYPE_LOG',
- 'FILE_TYPE_TURTLE',
- ],
- 'log_scan' : [
- 'LOGSCAN_FIRST',
- 'LOGSCAN_FROM_CKP',
- 'LOGSCAN_ONE',
- 'LOGSCAN_RECOVER',
- ],
- 'log_write' : [
- 'LOG_DSYNC',
- 'LOG_FLUSH',
- 'LOG_FSYNC',
- ],
- 'page_read' : [
- 'READ_CACHE',
- 'READ_COMPACT',
- 'READ_NO_GEN',
- 'READ_NO_EVICT',
- 'READ_NO_WAIT',
- 'READ_PREV',
- 'READ_SKIP_INTL',
- 'READ_SKIP_LEAF',
- 'READ_TRUNCATE',
- 'READ_WONT_NEED',
- ],
- 'rec_write' : [
- 'EVICTING',
- 'SKIP_UPDATE_ERR',
- 'SKIP_UPDATE_RESTORE',
- ],
- 'txn_log_checkpoint' : [
- 'TXN_LOG_CKPT_FAIL',
- 'TXN_LOG_CKPT_PREPARE',
- 'TXN_LOG_CKPT_START',
- 'TXN_LOG_CKPT_STOP',
- ],
- 'verbose' : [
- 'VERB_API',
- 'VERB_BLOCK',
- 'VERB_CHECKPOINT',
- 'VERB_COMPACT',
- 'VERB_EVICT',
- 'VERB_EVICTSERVER',
- 'VERB_FILEOPS',
- 'VERB_LOG',
- 'VERB_LSM',
- 'VERB_METADATA',
- 'VERB_MUTEX',
- 'VERB_OVERFLOW',
- 'VERB_READ',
- 'VERB_RECONCILE',
- 'VERB_RECOVERY',
- 'VERB_SALVAGE',
- 'VERB_SHARED_CACHE',
- 'VERB_SPLIT',
- 'VERB_TEMPORARY',
- 'VERB_TRANSACTION',
- 'VERB_VERIFY',
- 'VERB_VERSION',
- 'VERB_WRITE',
- ],
+ 'cache_flush' : [
+ 'SYNC_CHECKPOINT',
+ 'SYNC_CLOSE',
+ 'SYNC_DISCARD',
+ 'SYNC_DISCARD_FORCE',
+ 'SYNC_WRITE_LEAVES',
+ ],
+ 'file_types' : [
+ 'FILE_TYPE_CHECKPOINT',
+ 'FILE_TYPE_DATA',
+ 'FILE_TYPE_DIRECTORY',
+ 'FILE_TYPE_LOG',
+ 'FILE_TYPE_TURTLE',
+ ],
+ 'log_scan' : [
+ 'LOGSCAN_FIRST',
+ 'LOGSCAN_FROM_CKP',
+ 'LOGSCAN_ONE',
+ 'LOGSCAN_RECOVER',
+ ],
+ 'log_write' : [
+ 'LOG_DSYNC',
+ 'LOG_FLUSH',
+ 'LOG_FSYNC',
+ ],
+ 'page_read' : [
+ 'READ_CACHE',
+ 'READ_COMPACT',
+ 'READ_NO_GEN',
+ 'READ_NO_EVICT',
+ 'READ_NO_WAIT',
+ 'READ_PREV',
+ 'READ_SKIP_INTL',
+ 'READ_SKIP_LEAF',
+ 'READ_TRUNCATE',
+ 'READ_WONT_NEED',
+ ],
+ 'rec_write' : [
+ 'EVICTING',
+ 'SKIP_UPDATE_ERR',
+ 'SKIP_UPDATE_RESTORE',
+ ],
+ 'txn_log_checkpoint' : [
+ 'TXN_LOG_CKPT_FAIL',
+ 'TXN_LOG_CKPT_PREPARE',
+ 'TXN_LOG_CKPT_START',
+ 'TXN_LOG_CKPT_STOP',
+ ],
+ 'verbose' : [
+ 'VERB_API',
+ 'VERB_BLOCK',
+ 'VERB_CHECKPOINT',
+ 'VERB_COMPACT',
+ 'VERB_EVICT',
+ 'VERB_EVICTSERVER',
+ 'VERB_FILEOPS',
+ 'VERB_LOG',
+ 'VERB_LSM',
+ 'VERB_METADATA',
+ 'VERB_MUTEX',
+ 'VERB_OVERFLOW',
+ 'VERB_READ',
+ 'VERB_RECONCILE',
+ 'VERB_RECOVERY',
+ 'VERB_SALVAGE',
+ 'VERB_SHARED_CACHE',
+ 'VERB_SPLIT',
+ 'VERB_TEMPORARY',
+ 'VERB_TRANSACTION',
+ 'VERB_VERIFY',
+ 'VERB_VERSION',
+ 'VERB_WRITE',
+ ],
###################################################
# Structure flag declarations
###################################################
- 'conn' : [
- 'CONN_CACHE_POOL',
- 'CONN_CKPT_SYNC',
- 'CONN_EVICTION_RUN',
- 'CONN_LEAK_MEMORY',
- 'CONN_LSM_MERGE',
- 'CONN_PANIC',
- 'CONN_SERVER_RUN',
- 'CONN_SERVER_ASYNC',
- 'CONN_SERVER_CHECKPOINT',
- 'CONN_SERVER_LSM',
- 'CONN_SERVER_STATISTICS',
- 'CONN_SERVER_SWEEP',
- 'CONN_WAS_BACKUP',
- ],
- 'session' : [
- 'SESSION_CAN_WAIT',
- 'SESSION_CLEAR_EVICT_WALK',
- 'SESSION_DISCARD_FORCE',
- 'SESSION_HANDLE_LIST_LOCKED',
- 'SESSION_INTERNAL',
- 'SESSION_LOGGING_INMEM',
- 'SESSION_NO_CACHE',
- 'SESSION_NO_CACHE_CHECK',
- 'SESSION_NO_DATA_HANDLES',
- 'SESSION_NO_LOGGING',
- 'SESSION_NO_SCHEMA_LOCK',
- 'SESSION_SALVAGE_CORRUPT_OK',
- 'SESSION_SCHEMA_LOCKED',
- 'SESSION_SERVER_ASYNC',
- 'SESSION_TABLE_LOCKED',
- ],
+ 'conn' : [
+ 'CONN_CACHE_POOL',
+ 'CONN_CKPT_SYNC',
+ 'CONN_EVICTION_RUN',
+ 'CONN_LEAK_MEMORY',
+ 'CONN_LSM_MERGE',
+ 'CONN_PANIC',
+ 'CONN_SERVER_RUN',
+ 'CONN_SERVER_ASYNC',
+ 'CONN_SERVER_CHECKPOINT',
+ 'CONN_SERVER_LSM',
+ 'CONN_SERVER_STATISTICS',
+ 'CONN_SERVER_SWEEP',
+ 'CONN_WAS_BACKUP',
+ ],
+ 'session' : [
+ 'SESSION_CAN_WAIT',
+ 'SESSION_CLEAR_EVICT_WALK',
+ 'SESSION_DISCARD_FORCE',
+ 'SESSION_HANDLE_LIST_LOCKED',
+ 'SESSION_INTERNAL',
+ 'SESSION_LOGGING_INMEM',
+ 'SESSION_NO_CACHE',
+ 'SESSION_NO_CACHE_CHECK',
+ 'SESSION_NO_DATA_HANDLES',
+ 'SESSION_NO_LOGGING',
+ 'SESSION_NO_SCHEMA_LOCK',
+ 'SESSION_SALVAGE_CORRUPT_OK',
+ 'SESSION_SCHEMA_LOCKED',
+ 'SESSION_SERVER_ASYNC',
+ 'SESSION_TABLE_LOCKED',
+ ],
}
-flag_cnt = {} # Dictionary [flag] : [reference count]
-flag_name = {} # Dictionary [flag] : [name ...]
-name_mask = {} # Dictionary [name] : [used flag mask]
+flag_cnt = {} # Dictionary [flag] : [reference count]
+flag_name = {} # Dictionary [flag] : [name ...]
+name_mask = {} # Dictionary [name] : [used flag mask]
# Step through the flags dictionary and build our local dictionaries.
for method in flags.items():
- name_mask[method[0]] = 0x0
- for flag in method[1]:
- if flag == '__NONE__':
- continue
- if flag not in flag_cnt:
- flag_cnt[flag] = 1
- flag_name[flag] = []
- else:
- flag_cnt[flag] += 1
- flag_name[flag].append(method[0])
+ name_mask[method[0]] = 0x0
+ for flag in method[1]:
+ if flag == '__NONE__':
+ continue
+ if flag not in flag_cnt:
+ flag_cnt[flag] = 1
+ flag_name[flag] = []
+ else:
+ flag_cnt[flag] += 1
+ flag_name[flag].append(method[0])
# Create list of possible bit masks.
bits = [2 ** i for i in range(0, 32)]
@@ -142,46 +142,46 @@ bits = [2 ** i for i in range(0, 32)]
# Walk the list of flags in reverse, sorted-by-reference count order. For
# each flag, find a bit that's not currently in use by any method using the
# flag.
-flag_bit = {} # Dictionary [flag] : [bit value]
+flag_bit = {} # Dictionary [flag] : [bit value]
for f in sorted(flag_cnt.items(), key = lambda k_v : (-k_v[1], k_v[0])):
- mask = 0xffffffff
- for m in flag_name[f[0]]:
- mask &= ~name_mask[m]
- if mask == 0:
- print >>sys.stderr,\
- "flags.py: ran out of flags at " + m + " method",
- sys.exit(1)
- for b in bits:
- if mask & b:
- mask = b
- break
- flag_bit[f[0]] = mask
- for m in flag_name[f[0]]:
- name_mask[m] |= mask
+ mask = 0xffffffff
+ for m in flag_name[f[0]]:
+ mask &= ~name_mask[m]
+ if mask == 0:
+ print >>sys.stderr,\
+ "flags.py: ran out of flags at " + m + " method",
+ sys.exit(1)
+ for b in bits:
+ if mask & b:
+ mask = b
+ break
+ flag_bit[f[0]] = mask
+ for m in flag_name[f[0]]:
+ name_mask[m] |= mask
# Print out the flag masks in hex.
-# Assumes tab stops set to 8 characters.
+# Assumes tab stops set to 8 characters.
flag_info = ''
for f in sorted(flag_cnt.items()):
- flag_info += "#define\tWT_%s%s%#010x\n" %\
- (f[0],\
- "\t" * max(1, 6 - int((len('WT_') + len(f[0])) / 8)),\
- flag_bit[f[0]])
+ flag_info += "#define\tWT_%s%s%#010x\n" %\
+ (f[0],\
+ "\t" * max(1, 6 - int((len('WT_') + len(f[0])) / 8)),\
+ flag_bit[f[0]])
# Update the wiredtiger.in file with the flags information.
tmp_file = '__tmp'
tfile = open(tmp_file, 'w')
skip = 0
for line in open('../src/include/flags.h', 'r'):
- if skip:
- if line.count('flags section: END'):
- tfile.write('/*\n' + line)
- skip = 0
- else:
- tfile.write(line)
- if line.count('flags section: BEGIN'):
- skip = 1
- tfile.write(' */\n')
- tfile.write(flag_info)
+ if skip:
+ if line.count('flags section: END'):
+ tfile.write('/*\n' + line)
+ skip = 0
+ else:
+ tfile.write(line)
+ if line.count('flags section: BEGIN'):
+ skip = 1
+ tfile.write(' */\n')
+ tfile.write(flag_info)
tfile.close()
compare_srcfile(tmp_file, '../src/include/flags.h')
diff --git a/src/third_party/wiredtiger/dist/java_doc.py b/src/third_party/wiredtiger/dist/java_doc.py
index d44ccb12160..71dfd93d3a8 100644
--- a/src/third_party/wiredtiger/dist/java_doc.py
+++ b/src/third_party/wiredtiger/dist/java_doc.py
@@ -27,18 +27,17 @@ cfunc_re = re.compile('\t.*? __F\(([a-z_]*)\)')
curr_class = ""
for line in open(f, 'r'):
- m = cclass_re.match(line)
- if m:
- curr_class = m.group(1)
+ m = cclass_re.match(line)
+ if m:
+ curr_class = m.group(1)
- if curr_class == "":
- continue
+ if curr_class == "":
+ continue
- m = cfunc_re.match(line)
- if m:
- tfile.write('COPYDOC(__' + curr_class.lower() + ', ' +
- curr_class.upper() + ', ' + m.group(1) + ')\n')
+ m = cfunc_re.match(line)
+ if m:
+ tfile.write('COPYDOC(__' + curr_class.lower() + ', ' +
+ curr_class.upper() + ', ' + m.group(1) + ')\n')
tfile.close()
compare_srcfile(tmp_file, o)
-
diff --git a/src/third_party/wiredtiger/dist/log.py b/src/third_party/wiredtiger/dist/log.py
index 2f8fbea5294..e9c0fd7850c 100644
--- a/src/third_party/wiredtiger/dist/log.py
+++ b/src/third_party/wiredtiger/dist/log.py
@@ -8,76 +8,93 @@ import log_data
tmp_file = '__tmp'
# Map log record types to:
-# (C type, pack type, printf format, printf arg(s))
+# (C type, pack type, printf format, printf arg(s), printf setup)
field_types = {
- 'string' : ('const char *', 'S', '%s', 'arg'),
- 'item' : ('WT_ITEM *', 'u', '%.*s',
- '(int)arg.size, (const char *)arg.data'),
- 'recno' : ('uint64_t', 'r', '%" PRIu64 "', 'arg'),
- 'uint32' : ('uint32_t', 'I', '%" PRIu32 "', 'arg'),
- 'uint64' : ('uint64_t', 'Q', '%" PRIu64 "', 'arg'),
+ 'string' : ('const char *', 'S', '%s', 'arg', ''),
+ 'item' : ('WT_ITEM *', 'u', '%s', 'escaped',
+ 'WT_RET(__logrec_jsonify_str(session, &escaped, &arg));'),
+ 'recno' : ('uint64_t', 'r', '%" PRIu64 "', 'arg', ''),
+ 'uint32' : ('uint32_t', 'I', '%" PRIu32 "', 'arg', ''),
+ 'uint64' : ('uint64_t', 'Q', '%" PRIu64 "', 'arg', ''),
}
def cintype(f):
- return field_types[f[0]][0]
+ return field_types[f[0]][0]
def couttype(f):
- type = cintype(f)
- # We already have a pointer to a WT_ITEM
- if f[0] == 'item':
- return type
- if type[-1] != '*':
- type += ' '
- return type + '*'
+ type = cintype(f)
+ # We already have a pointer to a WT_ITEM
+ if f[0] == 'item':
+ return type
+ if type[-1] != '*':
+ type += ' '
+ return type + '*'
def clocaltype(f):
- type = cintype(f)
- # Allocate a WT_ITEM struct on the stack
- if f[0] == 'item':
- return type[:-2]
- return type
+ type = cintype(f)
+ # Allocate a WT_ITEM struct on the stack
+ if f[0] == 'item':
+ return type[:-2]
+ return type
+
+def escape_decl(fields):
+ for f in fields:
+ if 'escaped' in field_types[f[0]][4]:
+ return '\n\tchar *escaped;'
+ return ''
+
+def has_escape(fields):
+ for f in fields:
+ if 'escaped' in field_types[f[0]][4]:
+ return True
+ return False
def pack_fmt(fields):
- return ''.join(field_types[f[0]][1] for f in fields)
+ return ''.join(field_types[f[0]][1] for f in fields)
def op_pack_fmt(r):
- return 'II' + pack_fmt(r.fields)
+ return 'II' + pack_fmt(r.fields)
def rec_pack_fmt(r):
- return 'I' + pack_fmt(r.fields)
+ return 'I' + pack_fmt(r.fields)
def printf_fmt(f):
- return field_types[f[0]][2]
+ return field_types[f[0]][2]
def printf_arg(f):
- arg = field_types[f[0]][3].replace('arg', f[1])
- return '\n\t ' + arg if f[0] == 'item' else ' ' + arg
+ arg = field_types[f[0]][3].replace('arg', f[1])
+ return ' ' + arg
+
+def printf_setup(f):
+ stmt = field_types[f[0]][4].replace('arg', f[1])
+ return '' if stmt == '' else stmt + '\n\t'
+
#####################################################################
# Update log.h with #defines for types
#####################################################################
log_defines = (
- ''.join('/*! %s */\n#define\t%s\t%d\n' % (r.desc, r.macro_name(), i)
- for i, r in enumerate(log_data.rectypes)) +
- ''.join('/*! %s */\n#define\t%s\t%d\n' % (r.desc, r.macro_name(), i)
- for i, r in enumerate(log_data.optypes,start=1))
+ ''.join('/*! %s */\n#define\t%s\t%d\n' % (r.desc, r.macro_name(), i)
+ for i, r in enumerate(log_data.rectypes)) +
+ ''.join('/*! %s */\n#define\t%s\t%d\n' % (r.desc, r.macro_name(), i)
+ for i, r in enumerate(log_data.optypes,start=1))
)
tfile = open(tmp_file, 'w')
skip = 0
for line in open('../src/include/wiredtiger.in', 'r'):
- if skip:
- if 'Log record declarations: END' in line:
- tfile.write('/*\n' + line)
- skip = 0
- else:
- tfile.write(line)
- if 'Log record declarations: BEGIN' in line:
- skip = 1
- tfile.write(' */\n')
- tfile.write('/*! invalid operation */\n')
- tfile.write('#define\tWT_LOGOP_INVALID\t0\n')
- tfile.write(log_defines)
+ if skip:
+ if 'Log record declarations: END' in line:
+ tfile.write('/*\n' + line)
+ skip = 0
+ else:
+ tfile.write(line)
+ if 'Log record declarations: BEGIN' in line:
+ skip = 1
+ tfile.write(' */\n')
+ tfile.write('/*! invalid operation */\n')
+ tfile.write('#define\tWT_LOGOP_INVALID\t0\n')
+ tfile.write(log_defines)
tfile.close()
compare_srcfile(tmp_file, '../src/include/wiredtiger.in')
@@ -95,33 +112,33 @@ tfile.write('''
int
__wt_logrec_alloc(WT_SESSION_IMPL *session, size_t size, WT_ITEM **logrecp)
{
- WT_ITEM *logrec;
+\tWT_ITEM *logrec;
- WT_RET(__wt_scr_alloc(session, WT_ALIGN(size + 1, LOG_ALIGN), &logrec));
- WT_CLEAR(*(WT_LOG_RECORD *)logrec->data);
- logrec->size = offsetof(WT_LOG_RECORD, record);
+\tWT_RET(__wt_scr_alloc(session, WT_ALIGN(size + 1, LOG_ALIGN), &logrec));
+\tWT_CLEAR(*(WT_LOG_RECORD *)logrec->data);
+\tlogrec->size = offsetof(WT_LOG_RECORD, record);
- *logrecp = logrec;
- return (0);
+\t*logrecp = logrec;
+\treturn (0);
}
void
__wt_logrec_free(WT_SESSION_IMPL *session, WT_ITEM **logrecp)
{
- WT_UNUSED(session);
- __wt_scr_free(logrecp);
+\tWT_UNUSED(session);
+\t__wt_scr_free(logrecp);
}
int
__wt_logrec_read(WT_SESSION_IMPL *session,
const uint8_t **pp, const uint8_t *end, uint32_t *rectypep)
{
- uint64_t rectype;
+\tuint64_t rectype;
- WT_UNUSED(session);
- WT_RET(__wt_vunpack_uint(pp, WT_PTRDIFF(end, *pp), &rectype));
- *rectypep = (uint32_t)rectype;
- return (0);
+\tWT_UNUSED(session);
+\tWT_RET(__wt_vunpack_uint(pp, WT_PTRDIFF(end, *pp), &rectype));
+\t*rectypep = (uint32_t)rectype;
+\treturn (0);
}
int
@@ -129,99 +146,137 @@ __wt_logop_read(WT_SESSION_IMPL *session,
const uint8_t **pp, const uint8_t *end,
uint32_t *optypep, uint32_t *opsizep)
{
- return (__wt_struct_unpack(
- session, *pp, WT_PTRDIFF(end, *pp), "II", optypep, opsizep));
+\treturn (__wt_struct_unpack(
+\t session, *pp, WT_PTRDIFF(end, *pp), "II", optypep, opsizep));
+}
+
+static size_t
+__logrec_json_unpack_str(char *dest, size_t destlen, const char *src,
+ size_t srclen)
+{
+\tsize_t total;
+\tsize_t n;
+
+\ttotal = 0;
+\twhile (srclen > 0) {
+\t\tn = __wt_json_unpack_char(*src++, (u_char *)dest, destlen, 0);
+\t\tsrclen--;
+\t\tif (n > destlen)
+\t\t\tdestlen = 0;
+\t\telse {
+\t\t\tdestlen -= n;
+\t\t\tdest += n;
+\t\t}
+\t\ttotal += n;
+\t}
+\tif (destlen > 0)
+\t\t*dest = '\\0';
+\treturn (total + 1);
+}
+
+static int
+__logrec_jsonify_str(WT_SESSION_IMPL *session, char **destp, WT_ITEM *item)
+{
+\tsize_t needed;
+
+\tneeded = __logrec_json_unpack_str(NULL, 0, item->data, item->size);
+\tWT_RET(__wt_realloc(session, NULL, needed, destp));
+\t(void)__logrec_json_unpack_str(*destp, needed, item->data, item->size);
+\treturn (0);
}
''')
# Emit code to read, write and print log operations (within a log record)
for optype in log_data.optypes:
- if not optype.fields:
- continue
+ if not optype.fields:
+ continue
- tfile.write('''
+ tfile.write('''
int
__wt_logop_%(name)s_pack(
WT_SESSION_IMPL *session, WT_ITEM *logrec,
%(arg_decls)s)
{
- const char *fmt = WT_UNCHECKED_STRING(%(fmt)s);
- size_t size;
- uint32_t optype, recsize;
-
- optype = %(macro)s;
- WT_RET(__wt_struct_size(session, &size, fmt,
- optype, 0%(arg_names)s));
-
- __wt_struct_size_adjust(session, &size);
- WT_RET(__wt_buf_extend(session, logrec, logrec->size + size));
- recsize = (uint32_t)size;
- WT_RET(__wt_struct_pack(session,
- (uint8_t *)logrec->data + logrec->size, size, fmt,
- optype, recsize%(arg_names)s));
-
- logrec->size += (uint32_t)size;
- return (0);
+\tconst char *fmt = WT_UNCHECKED_STRING(%(fmt)s);
+\tsize_t size;
+\tuint32_t optype, recsize;
+
+\toptype = %(macro)s;
+\tWT_RET(__wt_struct_size(session, &size, fmt,
+\t optype, 0%(arg_names)s));
+
+\t__wt_struct_size_adjust(session, &size);
+\tWT_RET(__wt_buf_extend(session, logrec, logrec->size + size));
+\trecsize = (uint32_t)size;
+\tWT_RET(__wt_struct_pack(session,
+\t (uint8_t *)logrec->data + logrec->size, size, fmt,
+\t optype, recsize%(arg_names)s));
+
+\tlogrec->size += (uint32_t)size;
+\treturn (0);
}
''' % {
- 'name' : optype.name,
- 'macro' : optype.macro_name(),
- 'arg_decls' : ', '.join(
- '%s%s%s' % (cintype(f), '' if cintype(f)[-1] == '*' else ' ', f[1])
- for f in optype.fields),
- 'arg_names' : ''.join(', %s' % f[1] for f in optype.fields),
- 'fmt' : op_pack_fmt(optype)
+ 'name' : optype.name,
+ 'macro' : optype.macro_name(),
+ 'arg_decls' : ', '.join(
+ '%s%s%s' % (cintype(f), '' if cintype(f)[-1] == '*' else ' ', f[1])
+ for f in optype.fields),
+ 'arg_names' : ''.join(', %s' % f[1] for f in optype.fields),
+ 'fmt' : op_pack_fmt(optype)
})
- tfile.write('''
+ tfile.write('''
int
__wt_logop_%(name)s_unpack(
WT_SESSION_IMPL *session, const uint8_t **pp, const uint8_t *end,
%(arg_decls)s)
{
- const char *fmt = WT_UNCHECKED_STRING(%(fmt)s);
- uint32_t optype, size;
+\tconst char *fmt = WT_UNCHECKED_STRING(%(fmt)s);
+\tuint32_t optype, size;
- WT_RET(__wt_struct_unpack(session, *pp, WT_PTRDIFF(end, *pp), fmt,
- &optype, &size%(arg_names)s));
- WT_ASSERT(session, optype == %(macro)s);
+\tWT_RET(__wt_struct_unpack(session, *pp, WT_PTRDIFF(end, *pp), fmt,
+\t &optype, &size%(arg_names)s));
+\tWT_ASSERT(session, optype == %(macro)s);
- *pp += size;
- return (0);
+\t*pp += size;
+\treturn (0);
}
''' % {
- 'name' : optype.name,
- 'macro' : optype.macro_name(),
- 'arg_decls' : ', '.join(
- '%s%sp' % (couttype(f), f[1]) for f in optype.fields),
- 'arg_names' : ''.join(', %sp' % f[1] for f in optype.fields),
- 'fmt' : op_pack_fmt(optype)
+ 'name' : optype.name,
+ 'macro' : optype.macro_name(),
+ 'arg_decls' : ', '.join(
+ '%s%sp' % (couttype(f), f[1]) for f in optype.fields),
+ 'arg_names' : ''.join(', %sp' % f[1] for f in optype.fields),
+ 'fmt' : op_pack_fmt(optype)
})
- tfile.write('''
+ tfile.write('''
int
__wt_logop_%(name)s_print(
WT_SESSION_IMPL *session, const uint8_t **pp, const uint8_t *end, FILE *out)
{
- %(arg_decls)s
+\t%(arg_decls)s
- WT_RET(__wt_logop_%(name)s_unpack(
- session, pp, end%(arg_addrs)s));
+\t%(arg_init)sWT_RET(__wt_logop_%(name)s_unpack(
+\t session, pp, end%(arg_addrs)s));
- fprintf(out, " \\"optype\\": \\"%(name)s\\",\\n");
- %(print_args)s
- return (0);
+\tfprintf(out, " \\"optype\\": \\"%(name)s\\",\\n");
+\t%(print_args)s
+\t%(arg_fini)sreturn (0);
}
''' % {
- 'name' : optype.name,
- 'arg_decls' : '\n\t'.join('%s%s%s;' %
- (clocaltype(f), '' if clocaltype(f)[-1] == '*' else ' ', f[1])
- for f in optype.fields),
- 'arg_addrs' : ''.join(', &%s' % f[1] for f in optype.fields),
- 'print_args' : '\n\t'.join(
- 'fprintf(out, " \\"%s\\": \\"%s\\",\\n",%s);' %
- (f[1], printf_fmt(f), printf_arg(f))
- for f in optype.fields),
+ 'name' : optype.name,
+ 'arg_decls' : ('\n\t'.join('%s%s%s;' %
+ (clocaltype(f), '' if clocaltype(f)[-1] == '*' else ' ', f[1])
+ for f in optype.fields)) + escape_decl(optype.fields),
+ 'arg_init' : ('escaped = NULL;\n\t' if has_escape(optype.fields) else ''),
+ 'arg_fini' : ('__wt_free(session, escaped);\n\t'
+ if has_escape(optype.fields) else ''),
+ 'arg_addrs' : ''.join(', &%s' % f[1] for f in optype.fields),
+ 'print_args' : '\n\t'.join(
+ '%sfprintf(out, " \\"%s\\": \\"%s\\",\\n",%s);' %
+ (printf_setup(f), f[1], printf_fmt(f), printf_arg(f))
+ for f in optype.fields),
})
# Emit the printlog entry point
@@ -230,32 +285,32 @@ int
__wt_txn_op_printlog(
WT_SESSION_IMPL *session, const uint8_t **pp, const uint8_t *end, FILE *out)
{
- uint32_t optype, opsize;
+\tuint32_t optype, opsize;
- /* Peek at the size and the type. */
- WT_RET(__wt_logop_read(session, pp, end, &optype, &opsize));
- end = *pp + opsize;
+\t/* Peek at the size and the type. */
+\tWT_RET(__wt_logop_read(session, pp, end, &optype, &opsize));
+\tend = *pp + opsize;
- switch (optype) {''')
+\tswitch (optype) {''')
for optype in log_data.optypes:
- if not optype.fields:
- continue
+ if not optype.fields:
+ continue
- tfile.write('''
- case %(macro)s:
- WT_RET(%(print_func)s(session, pp, end, out));
- break;
+ tfile.write('''
+\tcase %(macro)s:
+\t\tWT_RET(%(print_func)s(session, pp, end, out));
+\t\tbreak;
''' % {
- 'macro' : optype.macro_name(),
- 'print_func' : '__wt_logop_' + optype.name + '_print',
+ 'macro' : optype.macro_name(),
+ 'print_func' : '__wt_logop_' + optype.name + '_print',
})
tfile.write('''
- WT_ILLEGAL_VALUE(session);
- }
+\tWT_ILLEGAL_VALUE(session);
+\t}
- return (0);
+\treturn (0);
}
''')
diff --git a/src/third_party/wiredtiger/dist/s_string.ok b/src/third_party/wiredtiger/dist/s_string.ok
index da1fb6497ad..0bd9cfc6a8c 100644
--- a/src/third_party/wiredtiger/dist/s_string.ok
+++ b/src/third_party/wiredtiger/dist/s_string.ok
@@ -372,7 +372,9 @@ WiredTigerCheckpoint
WiredTigerHome
WiredTigerInit
WiredTigerLog
+WiredTigerPreplog
WiredTigerStat
+WiredTigerTmplog
WiredTigerTxn
WithSeeds
Wmissing
diff --git a/src/third_party/wiredtiger/dist/s_style b/src/third_party/wiredtiger/dist/s_style
index e36924dffb9..50d00ff1d7f 100755
--- a/src/third_party/wiredtiger/dist/s_style
+++ b/src/third_party/wiredtiger/dist/s_style
@@ -157,18 +157,22 @@ for f in \
done
# Check Python coding standards: check for tab characters.
-egrep ' ' tools/*.py test/suite/*.py |
- sed 's/:.*//' |
+egrep ' ' `find . -name '*.py'` |
+ sed -e 's/:.*//' \
+ -e '/__init__.py/d' \
+ -e '/src\/docs\/tools\/doxypy.py/d' |
sort -u |
sed 's/^/ /' > $t
test -s $t && {
- echo '[tab] characters appear in test suite scripts:'
+ echo '[tab] characters appear in Python scripts:'
cat $t
}
# Check Python coding standards: check for trailing semi-colons.
-egrep ';$' tools/*.py test/suite/*.py > $t
+# Don't check too widely, there are third-party tools that fail this test as
+# well as scripts in this directory that output C code, and so fail the test.
+egrep ';$' `find lang test -name '*.py'`> $t
test -s $t && {
- echo 'trailing semi-colons in tools or test suite Python code:'
+ echo 'trailing semi-colons in selected Python code:'
cat $t
}
diff --git a/src/third_party/wiredtiger/dist/serial.py b/src/third_party/wiredtiger/dist/serial.py
index 6abfa5bc96f..ddadbbdb6be 100644
--- a/src/third_party/wiredtiger/dist/serial.py
+++ b/src/third_party/wiredtiger/dist/serial.py
@@ -4,89 +4,89 @@ import textwrap
from dist import compare_srcfile
class SerialArg:
- def __init__(self, typestr, name, sized=0):
- self.typestr = typestr
- self.name = name
- self.sized = sized
+ def __init__(self, typestr, name, sized=0):
+ self.typestr = typestr
+ self.name = name
+ self.sized = sized
class Serial:
- def __init__(self, name, args):
- self.name = name
- self.args = args
+ def __init__(self, name, args):
+ self.name = name
+ self.args = args
msgtypes = [
Serial('col_append', [
- SerialArg('WT_INSERT_HEAD *', 'ins_head'),
- SerialArg('WT_INSERT ***', 'ins_stack'),
- SerialArg('WT_INSERT *', 'new_ins', 1),
- SerialArg('uint64_t *', 'recnop'),
- SerialArg('u_int', 'skipdepth'),
- ]),
+ SerialArg('WT_INSERT_HEAD *', 'ins_head'),
+ SerialArg('WT_INSERT ***', 'ins_stack'),
+ SerialArg('WT_INSERT *', 'new_ins', 1),
+ SerialArg('uint64_t *', 'recnop'),
+ SerialArg('u_int', 'skipdepth'),
+ ]),
Serial('insert', [
- SerialArg('WT_INSERT_HEAD *', 'ins_head'),
- SerialArg('WT_INSERT ***', 'ins_stack'),
- SerialArg('WT_INSERT *', 'new_ins', 1),
- SerialArg('u_int', 'skipdepth'),
- ]),
+ SerialArg('WT_INSERT_HEAD *', 'ins_head'),
+ SerialArg('WT_INSERT ***', 'ins_stack'),
+ SerialArg('WT_INSERT *', 'new_ins', 1),
+ SerialArg('u_int', 'skipdepth'),
+ ]),
Serial('update', [
- SerialArg('WT_UPDATE **', 'srch_upd'),
- SerialArg('WT_UPDATE *', 'upd', 1),
- ]),
+ SerialArg('WT_UPDATE **', 'srch_upd'),
+ SerialArg('WT_UPDATE *', 'upd', 1),
+ ]),
]
# decl --
-# Return a declaration for the variable.
+# Return a declaration for the variable.
def decl(l):
- o = l.typestr
- if o[-1] != '*':
- o += ' '
- return o + l.name
+ o = l.typestr
+ if o[-1] != '*':
+ o += ' '
+ return o + l.name
# decl_p --
-# Return a declaration for a reference to the variable, which requires
+# Return a declaration for a reference to the variable, which requires
# another level of indirection.
def decl_p(l):
- o = l.typestr
- if o[-1] != '*':
- o += ' '
- return o + '*' + l.name + 'p'
+ o = l.typestr
+ if o[-1] != '*':
+ o += ' '
+ return o + '*' + l.name + 'p'
# output --
-# Create serialized function calls.
+# Create serialized function calls.
def output(entry, f):
- # Function declaration.
- f.write('static inline int\n__wt_' + entry.name + '_serial(\n')
- o = 'WT_SESSION_IMPL *session, WT_PAGE *page'
- for l in entry.args:
- if l.sized:
- o += ', ' + decl_p(l) + ', size_t ' + l.name + '_size'
- else:
- o += ', ' + decl(l)
- o += ')'
- f.write('\n'.join('\t' + l for l in textwrap.wrap(o, 70)))
- f.write('\n{')
-
- # Local variable declarations.
- for l in entry.args:
- if l.sized:
- f.write('''
+ # Function declaration.
+ f.write('static inline int\n__wt_' + entry.name + '_serial(\n')
+ o = 'WT_SESSION_IMPL *session, WT_PAGE *page'
+ for l in entry.args:
+ if l.sized:
+ o += ', ' + decl_p(l) + ', size_t ' + l.name + '_size'
+ else:
+ o += ', ' + decl(l)
+ o += ')'
+ f.write('\n'.join('\t' + l for l in textwrap.wrap(o, 70)))
+ f.write('\n{')
+
+ # Local variable declarations.
+ for l in entry.args:
+ if l.sized:
+ f.write('''
\t''' + decl(l) + ''' = *''' + l.name + '''p;
\tWT_DECL_RET;
\tsize_t incr_mem;
''')
- # Clear memory references we now own.
- for l in entry.args:
- if l.sized:
- f.write('''
+ # Clear memory references we now own.
+ for l in entry.args:
+ if l.sized:
+ f.write('''
\t/* Clear references to memory we now own. */
\t*''' + l.name + '''p = NULL;
''')
- # Check the page write generation hasn't wrapped.
- f.write('''
+ # Check the page write generation hasn't wrapped.
+ f.write('''
\t/*
\t * Check to see if the page's write generation is about to wrap (wildly
\t * unlikely as it implies 4B updates between clean page reconciliations,
@@ -102,43 +102,43 @@ def output(entry, f):
\t WT_RET(__page_write_gen_wrapped_check(page));
''')
- # Call the worker function.
- if entry.name != "update":
- f.write('''
+ # Call the worker function.
+ if entry.name != "update":
+ f.write('''
\t/* Acquire the page's spinlock, call the worker function. */
\tWT_PAGE_LOCK(session, page);''')
- f.write('''
+ f.write('''
\tret = __''' + entry.name + '''_serial_func(
''')
- o = 'session'
- if entry.name == "update":
- o += ', page'
- for l in entry.args:
- o += ', ' + l.name
- o += ');'
- f.write('\n'.join('\t ' + l for l in textwrap.wrap(o, 70)))
-
- if entry.name != "update":
- f.write('''
+ o = 'session'
+ if entry.name == "update":
+ o += ', page'
+ for l in entry.args:
+ o += ', ' + l.name
+ o += ');'
+ f.write('\n'.join('\t ' + l for l in textwrap.wrap(o, 70)))
+
+ if entry.name != "update":
+ f.write('''
\tWT_PAGE_UNLOCK(session, page);''')
- f.write('''
+ f.write('''
\t/* Free unused memory on error. */
\tif (ret != 0) {
''')
- for l in entry.args:
- if not l.sized:
- continue
- f.write(
- '\t\t__wt_free(session, ' + l.name + ');\n')
- f.write('''
+ for l in entry.args:
+ if not l.sized:
+ continue
+ f.write(
+ '\t\t__wt_free(session, ' + l.name + ');\n')
+ f.write('''
\t\treturn (ret);
\t}
''')
- f.write('''
+ f.write('''
\t/*
\t * Increment in-memory footprint after releasing the mutex: that's safe
\t * because the structures we added cannot be discarded while visible to
@@ -147,13 +147,13 @@ def output(entry, f):
\t */
\tincr_mem = 0;
''')
- for l in entry.args:
- if not l.sized:
- continue
- f.write('\tWT_ASSERT(session, ' +
- l.name + '_size != 0);\n')
- f.write('\tincr_mem += ' + l.name + '_size;\n')
- f.write('''\tif (incr_mem != 0)
+ for l in entry.args:
+ if not l.sized:
+ continue
+ f.write('\tWT_ASSERT(session, ' +
+ l.name + '_size != 0);\n')
+ f.write('\tincr_mem += ' + l.name + '_size;\n')
+ f.write('''\tif (incr_mem != 0)
\t\t__wt_cache_page_inmem_incr(session, page, incr_mem);
\t/* Mark the page dirty after updating the footprint. */
@@ -171,19 +171,19 @@ tmp_file = '__tmp'
tfile = open(tmp_file, 'w')
skip = 0
for line in open('../src/include/serial.i', 'r'):
- if not skip:
- tfile.write(line)
- if line.count('Serialization function section: END'):
- tfile.write(line)
- skip = 0
- elif line.count('Serialization function section: BEGIN'):
- tfile.write(' */\n\n')
- skip = 1
-
- for entry in msgtypes:
- output(entry, tfile)
-
- tfile.write('/*\n')
+ if not skip:
+ tfile.write(line)
+ if line.count('Serialization function section: END'):
+ tfile.write(line)
+ skip = 0
+ elif line.count('Serialization function section: BEGIN'):
+ tfile.write(' */\n\n')
+ skip = 1
+
+ for entry in msgtypes:
+ output(entry, tfile)
+
+ tfile.write('/*\n')
tfile.close()
compare_srcfile(tmp_file, '../src/include/serial.i')
diff --git a/src/third_party/wiredtiger/dist/stat.py b/src/third_party/wiredtiger/dist/stat.py
index 5ffcd07e66c..503753ddd01 100644
--- a/src/third_party/wiredtiger/dist/stat.py
+++ b/src/third_party/wiredtiger/dist/stat.py
@@ -8,40 +8,40 @@ from dist import compare_srcfile
from stat_data import groups, dsrc_stats, connection_stats
def print_struct(title, name, base, stats):
- '''Print the structures for the stat.h file.'''
- f.write('/*\n')
- f.write(' * Statistics entries for ' + title + '.\n')
- f.write(' */\n')
- f.write(
- '#define\tWT_' + name.upper() + '_STATS_BASE\t' + str(base) + '\n')
- f.write('struct __wt_' + name + '_stats {\n')
-
- for l in stats:
- f.write('\tWT_STATS ' + l.name + ';\n')
- f.write('};\n\n')
+ '''Print the structures for the stat.h file.'''
+ f.write('/*\n')
+ f.write(' * Statistics entries for ' + title + '.\n')
+ f.write(' */\n')
+ f.write(
+ '#define\tWT_' + name.upper() + '_STATS_BASE\t' + str(base) + '\n')
+ f.write('struct __wt_' + name + '_stats {\n')
+
+ for l in stats:
+ f.write('\tWT_STATS ' + l.name + ';\n')
+ f.write('};\n\n')
# Update the #defines in the stat.h file.
tmp_file = '__tmp'
f = open(tmp_file, 'w')
skip = 0
for line in open('../src/include/stat.h', 'r'):
- if not skip:
- f.write(line)
- if line.count('Statistics section: END'):
- f.write(line)
- skip = 0
- elif line.count('Statistics section: BEGIN'):
- f.write('\n')
- skip = 1
- print_struct(
- 'connections', 'connection', 1000, connection_stats)
- print_struct('data sources', 'dsrc', 2000, dsrc_stats)
+ if not skip:
+ f.write(line)
+ if line.count('Statistics section: END'):
+ f.write(line)
+ skip = 0
+ elif line.count('Statistics section: BEGIN'):
+ f.write('\n')
+ skip = 1
+ print_struct(
+ 'connections', 'connection', 1000, connection_stats)
+ print_struct('data sources', 'dsrc', 2000, dsrc_stats)
f.close()
compare_srcfile(tmp_file, '../src/include/stat.h')
def print_defines():
- '''Print the #defines for the wiredtiger.in file.'''
- f.write('''
+ '''Print the #defines for the wiredtiger.in file.'''
+ f.write('''
/*!
* @name Connection statistics
* @anchor statistics_keys
@@ -52,12 +52,12 @@ def print_defines():
* @{
*/
''')
- for v, l in enumerate(connection_stats, 1000):
- f.write('/*! %s */\n' % '\n * '.join(textwrap.wrap(l.desc, 70)))
- f.write('#define\tWT_STAT_CONN_' + l.name.upper() + "\t" *
- max(1, 6 - int((len('WT_STAT_CONN_' + l.name)) / 8)) +
- str(v) + '\n')
- f.write('''
+ for v, l in enumerate(connection_stats, 1000):
+ f.write('/*! %s */\n' % '\n * '.join(textwrap.wrap(l.desc, 70)))
+ f.write('#define\tWT_STAT_CONN_' + l.name.upper() + "\t" *
+ max(1, 6 - int((len('WT_STAT_CONN_' + l.name)) / 8)) +
+ str(v) + '\n')
+ f.write('''
/*!
* @}
* @name Statistics for data sources
@@ -65,34 +65,34 @@ def print_defines():
* @{
*/
''')
- for v, l in enumerate(dsrc_stats, 2000):
- f.write('/*! %s */\n' % '\n * '.join(textwrap.wrap(l.desc, 70)))
- f.write('#define\tWT_STAT_DSRC_' + l.name.upper() + "\t" *
- max(1, 6 - int((len('WT_STAT_DSRC_' + l.name)) / 8)) +
- str(v) + '\n')
- f.write('/*! @} */\n')
+ for v, l in enumerate(dsrc_stats, 2000):
+ f.write('/*! %s */\n' % '\n * '.join(textwrap.wrap(l.desc, 70)))
+ f.write('#define\tWT_STAT_DSRC_' + l.name.upper() + "\t" *
+ max(1, 6 - int((len('WT_STAT_DSRC_' + l.name)) / 8)) +
+ str(v) + '\n')
+ f.write('/*! @} */\n')
# Update the #defines in the wiredtiger.in file.
tmp_file = '__tmp'
f = open(tmp_file, 'w')
skip = 0
for line in open('../src/include/wiredtiger.in', 'r'):
- if not skip:
- f.write(line)
- if line.count('Statistics section: END'):
- f.write(line)
- skip = 0
- elif line.count('Statistics section: BEGIN'):
- f.write(' */\n')
- skip = 1
- print_defines()
- f.write('/*\n')
+ if not skip:
+ f.write(line)
+ if line.count('Statistics section: END'):
+ f.write(line)
+ skip = 0
+ elif line.count('Statistics section: BEGIN'):
+ f.write(' */\n')
+ skip = 1
+ print_defines()
+ f.write('/*\n')
f.close()
compare_srcfile(tmp_file, '../src/include/wiredtiger.in')
def print_func(name, list):
- '''Print the functions for the stat.c file.'''
- f.write('''
+ '''Print the functions for the stat.c file.'''
+ f.write('''
void
__wt_stat_init_''' + name + '''_stats(WT_''' + name.upper() + '''_STATS *stats)
{
@@ -100,15 +100,15 @@ __wt_stat_init_''' + name + '''_stats(WT_''' + name.upper() + '''_STATS *stats)
\tmemset(stats, 0, sizeof(*stats));
''')
- for l in sorted(list):
- o = '\tstats->' + l.name + '.desc = "' + l.desc + '";\n'
- if len(o) + 7 > 80:
- o = o.replace('= ', '=\n\t ')
- f.write(o)
- f.write('''}
+ for l in sorted(list):
+ o = '\tstats->' + l.name + '.desc = "' + l.desc + '";\n'
+ if len(o) + 7 > 80:
+ o = o.replace('= ', '=\n\t ')
+ f.write(o)
+ f.write('''}
''')
- f.write('''
+ f.write('''
void
__wt_stat_refresh_''' + name + '''_stats(void *stats_arg)
{
@@ -116,17 +116,17 @@ __wt_stat_refresh_''' + name + '''_stats(void *stats_arg)
\tstats = (WT_''' + name.upper() + '''_STATS *)stats_arg;
''')
- for l in sorted(list):
- # no_clear: don't clear the value.
- if not 'no_clear' in l.flags:
- f.write('\tstats->' + l.name + '.v = 0;\n');
- f.write('}\n')
+ for l in sorted(list):
+ # no_clear: don't clear the value.
+ if not 'no_clear' in l.flags:
+ f.write('\tstats->' + l.name + '.v = 0;\n');
+ f.write('}\n')
- # Aggregation is only interesting for data-source statistics.
- if name == 'connection':
- return;
+ # Aggregation is only interesting for data-source statistics.
+ if name == 'connection':
+ return;
- f.write('''
+ f.write('''
void
__wt_stat_aggregate_''' + name +
'''_stats(const void *child, const void *parent)
@@ -136,16 +136,16 @@ __wt_stat_aggregate_''' + name +
\tc = (WT_''' + name.upper() + '''_STATS *)child;
\tp = (WT_''' + name.upper() + '''_STATS *)parent;
''')
- for l in sorted(list):
- if 'no_aggregate' in l.flags:
- continue;
- elif 'max_aggregate' in l.flags:
- o = 'if (c->' + l.name + '.v > p->' + l.name +\
- '.v)\n\t p->' + l.name + '.v = c->' + l.name + '.v;'
- else:
- o = 'p->' + l.name + '.v += c->' + l.name + '.v;'
- f.write('\t' + o + '\n')
- f.write('}\n')
+ for l in sorted(list):
+ if 'no_aggregate' in l.flags:
+ continue;
+ elif 'max_aggregate' in l.flags:
+ o = 'if (c->' + l.name + '.v > p->' + l.name +\
+ '.v)\n\t p->' + l.name + '.v = c->' + l.name + '.v;'
+ else:
+ o = 'p->' + l.name + '.v += c->' + l.name + '.v;'
+ f.write('\t' + o + '\n')
+ f.write('}\n')
# Write the stat initialization and refresh routines to the stat.c file.
f = open(tmp_file, 'w')
@@ -163,23 +163,23 @@ scale_info = 'no_scale_per_second_list = [\n'
clear_info = 'no_clear_list = [\n'
prefix_list = []
for l in sorted(connection_stats):
- prefix_list.append(l.prefix)
- if 'no_scale' in l.flags:
- scale_info += ' \'' + l.desc + '\',\n'
- if 'no_clear' in l.flags:
- clear_info += ' \'' + l.desc + '\',\n'
+ prefix_list.append(l.prefix)
+ if 'no_scale' in l.flags:
+ scale_info += ' \'' + l.desc + '\',\n'
+ if 'no_clear' in l.flags:
+ clear_info += ' \'' + l.desc + '\',\n'
for l in sorted(dsrc_stats):
- prefix_list.append(l.prefix)
- if 'no_scale' in l.flags:
- scale_info += ' \'' + l.desc + '\',\n'
- if 'no_clear' in l.flags:
- clear_info += ' \'' + l.desc + '\',\n'
+ prefix_list.append(l.prefix)
+ if 'no_scale' in l.flags:
+ scale_info += ' \'' + l.desc + '\',\n'
+ if 'no_clear' in l.flags:
+ clear_info += ' \'' + l.desc + '\',\n'
scale_info += ']\n'
clear_info += ']\n'
prefix_info = 'prefix_list = [\n'
# Remove the duplicates and print out the list
for l in list(set(prefix_list)):
- prefix_info += ' \'' + l + '\',\n'
+ prefix_info += ' \'' + l + '\',\n'
prefix_info += ']\n'
group_info = 'groups = ' + str(groups)
diff --git a/src/third_party/wiredtiger/dist/stat_data.py b/src/third_party/wiredtiger/dist/stat_data.py
index 1e4f1b41cb9..bd628e7418a 100644
--- a/src/third_party/wiredtiger/dist/stat_data.py
+++ b/src/third_party/wiredtiger/dist/stat_data.py
@@ -18,65 +18,65 @@ import sys
class Stat:
def __init__(self, name, tag, desc, flags=''):
- self.name = name
- self.desc = tag + ': ' + desc
- self.flags = flags
+ self.name = name
+ self.desc = tag + ': ' + desc
+ self.flags = flags
def __cmp__(self, other):
- return cmp(self.desc.lower(), other.desc.lower())
+ return cmp(self.desc.lower(), other.desc.lower())
class AsyncStat(Stat):
prefix = 'async'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, AsyncStat.prefix, desc, flags)
+ Stat.__init__(self, name, AsyncStat.prefix, desc, flags)
class BlockStat(Stat):
prefix = 'block-manager'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, BlockStat.prefix, desc, flags)
+ Stat.__init__(self, name, BlockStat.prefix, desc, flags)
class BtreeStat(Stat):
prefix = 'btree'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, BtreeStat.prefix, desc, flags)
+ Stat.__init__(self, name, BtreeStat.prefix, desc, flags)
class CacheStat(Stat):
prefix = 'cache'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, CacheStat.prefix, desc, flags)
+ Stat.__init__(self, name, CacheStat.prefix, desc, flags)
class CompressStat(Stat):
prefix = 'compression'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, CompressStat.prefix, desc, flags)
+ Stat.__init__(self, name, CompressStat.prefix, desc, flags)
class ConnStat(Stat):
prefix = 'connection'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, ConnStat.prefix, desc, flags)
+ Stat.__init__(self, name, ConnStat.prefix, desc, flags)
class CursorStat(Stat):
prefix = 'cursor'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, CursorStat.prefix, desc, flags)
+ Stat.__init__(self, name, CursorStat.prefix, desc, flags)
class DhandleStat(Stat):
prefix = 'data-handle'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, DhandleStat.prefix, desc, flags)
+ Stat.__init__(self, name, DhandleStat.prefix, desc, flags)
class LogStat(Stat):
prefix = 'log'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, LogStat.prefix, desc, flags)
+ Stat.__init__(self, name, LogStat.prefix, desc, flags)
class LSMStat(Stat):
prefix = 'LSM'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, LSMStat.prefix, desc, flags)
+ Stat.__init__(self, name, LSMStat.prefix, desc, flags)
class RecStat(Stat):
prefix = 'reconciliation'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, RecStat.prefix, desc, flags)
+ Stat.__init__(self, name, RecStat.prefix, desc, flags)
class SessionStat(Stat):
prefix = 'session'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, SessionStat.prefix, desc, flags)
+ Stat.__init__(self, name, SessionStat.prefix, desc, flags)
class TxnStat(Stat):
prefix = 'transaction'
def __init__(self, name, desc, flags=''):
- Stat.__init__(self, name, TxnStat.prefix, desc, flags)
+ Stat.__init__(self, name, TxnStat.prefix, desc, flags)
##########################################
# Groupings of useful statistics:
@@ -185,6 +185,8 @@ connection_stats = [
##########################################
# Dhandle statistics
##########################################
+ DhandleStat('dh_conn_handles', 'connection dhandles swept'),
+ DhandleStat('dh_conn_sweeps', 'connection sweeps'),
DhandleStat('dh_session_handles', 'session dhandles swept'),
DhandleStat('dh_session_sweeps', 'session sweep attempts'),
@@ -196,7 +198,15 @@ connection_stats = [
LogStat('log_bytes_payload', 'log bytes of payload data'),
LogStat('log_bytes_written', 'log bytes written'),
LogStat('log_close_yields', 'yields waiting for previous log file close'),
- LogStat('log_max_filesize', 'maximum log file size', 'no_clear'),
+ LogStat('log_compress_len', 'total size of compressed records'),
+ LogStat('log_compress_mem', 'total in-memory size of compressed records'),
+ LogStat('log_compress_small', 'log records too small to compress'),
+ LogStat('log_compress_writes', 'log records compressed'),
+ LogStat('log_compress_write_fails', 'log records not compressed'),
+ LogStat('log_max_filesize', 'maximum log file size', 'no_clear,no_scale'),
+ LogStat('log_prealloc_files', 'pre-allocated log files prepared'),
+ LogStat('log_prealloc_max', 'number of pre-allocated log files to create'),
+ LogStat('log_prealloc_used', 'pre-allocated log files used'),
LogStat('log_reads', 'log read operations'),
LogStat('log_scan_records', 'records processed by log scan'),
LogStat('log_scan_rereads', 'log scan records requiring two reads'),