summaryrefslogtreecommitdiff
path: root/src/third_party/wiredtiger/bench/workgen/runner/compress_ratio.py
blob: 8fa92170d7237a725a7cd9460996cff890e74e23 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#!/usr/bin/env python
#
# Public Domain 2014-2020 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#

# Drive a constant high workload through, even if WiredTiger isn't keeping
# up by dividing the workload across a lot of threads. This needs to be
# tuned to the particular machine so the workload is close to capacity in the
# steady state, but not overwhelming.
#
################
# Note: This looks similar to multi_btree_heavy_stress.py with values altered
# for run time, #ops, #threads, #throttle to maintain dirty cache around the
# eviction target of 5% on the AWS perf machines. These values being machine
# dependant might need to be altered as per the machine this workload gets
# run on.
#
from runner import *
from wiredtiger import *
from workgen import *

def op_append(ops, op):
    if ops == None:
        ops = op
    else:
        ops += op
    return ops

def make_op(optype, table, key, value = None):
    if value == None:
        return Operation(optype, table, key)
    else:
        return Operation(optype, table, key, value)

logkey = Key(Key.KEYGEN_APPEND, 8)  ## should be 8 bytes format 'Q'
def operations(optype, tables, key, value = None, ops_per_txn = 0, logtable = None):
    txn_list = []
    ops = None
    nops = 0
    for table in tables:
        ops = op_append(ops, make_op(optype, table, key, value))
        if logtable != None:
            ops = op_append(ops, make_op(optype, logtable, logkey, value))
        nops += 1
        if ops_per_txn > 0 and nops % ops_per_txn == 0:
            txn_list.append(txn(ops))
            ops = None
    if ops_per_txn > 0:
        if ops != None:
            txn_list.append(txn(ops))
            ops = None
        for t in txn_list:
            ops = op_append(ops, t)
    return ops

context = Context()
conn_config="create,cache_size=2GB,session_max=1000,eviction=(threads_min=4,threads_max=4),log=(enabled=false),transaction_sync=(enabled=false),checkpoint_sync=false,checkpoint=(wait=20),statistics=(fast),statistics_log=(json,wait=1)"
table_config="allocation_size=4k,memory_page_max=10MB,prefix_compression=false,split_pct=90,leaf_page_max=32k,internal_page_max=16k,type=file"
compression_opts = {
    "none" : "block_compressor=none",
    "lz4" : "block_compressor=lz4",
    "snappy" : "block_compressor=snappy",
    "zlib" : "block_compressor=zlib",
    "zlib_onepage" : "block_compressor=zlib,memory_page_image_max=32k",
    "zlib_tenpage" : "block_compressor=zlib,memory_page_image_max=320k",
    "zstd" : "block_compressor=zstd"
}

# What compressors are available for testing, and the connection configuration
# needed, depends on what compressors have been configured into the WiredTiger
# library linked by workgen.  Any compressors that are explicitly 'built-in'
# to WiredTiger will not need an explicit extension parameter.
#
#conn_config += extensions_config(['compressors/snappy'])

conn = wiredtiger_open("WT_TEST", conn_config)
s = conn.open_session()

tables = []
for name_ext, compress_config in compression_opts.iteritems():
    tname = "table:test_" + name_ext
    s.create(tname, 'key_format=S,value_format=S,' + table_config + "," + compress_config)
    table = Table(tname)
    table.options.value_compressibility = 70
    tables.append(table)

icount=500000
ins_ops = operations(Operation.OP_INSERT, tables, Key(Key.KEYGEN_APPEND, 20), Value(500))
thread = Thread(ins_ops * icount)
pop_workload = Workload(context, thread)
print('populate:')
pop_workload.run(conn)

ins_ops = operations(Operation.OP_INSERT, tables, Key(Key.KEYGEN_APPEND, 20), Value(500), 0)
upd_ops = operations(Operation.OP_UPDATE, tables, Key(Key.KEYGEN_UNIFORM, 20), Value(500), 0)

ins_thread = Thread(ins_ops)
upd_thread = Thread(upd_ops)
ins_thread.options.throttle = 1000
ins_thread.options.name = "Insert"
upd_thread.options.throttle = 1000
upd_thread.options.name = "Update"
threads = ins_thread * 2 + upd_thread * 10
workload = Workload(context, threads)
workload.options.run_time = 60
workload.options.report_interval = 1
workload.options.sample_interval = 1
workload.options.sample_rate = 1
print('Update heavy workload:')
workload.run(conn)