summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
authorKeith Bostic <keith@wiredtiger.com>2013-04-30 19:57:37 -0400
committerKeith Bostic <keith@wiredtiger.com>2013-04-30 19:57:37 -0400
commit9213cc4af1d221abf233d35c62bf5e740786f9f7 (patch)
tree5b8030c196d899106b4c44f1479515017bb8a299 /tools
parent859482f76151cdc3571a166159464356a7030c93 (diff)
downloadmongo-9213cc4af1d221abf233d35c62bf5e740786f9f7.tar.gz
Add support for aggregating data-source statistics, currently only
used by the LSM tree. Add support for automatically updating the list of statistics that are to be "scaled per second" by statlog.py.
Diffstat (limited to 'tools')
-rw-r--r--tools/statlog.py123
1 files changed, 115 insertions, 8 deletions
diff --git a/tools/statlog.py b/tools/statlog.py
index f6ff4c4412e..4514306ab4c 100644
--- a/tools/statlog.py
+++ b/tools/statlog.py
@@ -33,6 +33,118 @@ from subprocess import call
TIMEFMT = "%b %d %H:%M:%S"
+# scale-per-second list section: BEGIN
+scale_per_second_list = [
+ 'mapped bytes read by the block manager',
+ 'bytes read by the block manager',
+ 'bytes written by the block manager',
+ 'mapped blocks read by the block manager',
+ 'blocks read by the block manager',
+ 'blocks written by the block manager',
+ 'cache: bytes read into cache',
+ 'cache: bytes written from cache',
+ 'cache: checkpoint blocked page eviction',
+ 'cache: unmodified pages evicted',
+ 'cache: modified pages evicted',
+ 'cache: pages selected for eviction unable to be evicted',
+ 'cache: pages queued for forced eviction',
+ 'cache: hazard pointer blocked page eviction',
+ 'cache: internal pages evicted',
+ 'cache: internal page merge operations completed',
+ 'cache: internal page merge attempts that could not complete',
+ 'cache: internal levels merged',
+ 'cache: eviction server unable to reach eviction goal',
+ 'cache: pages walked for eviction',
+ 'cache: pages read into cache',
+ 'cache: pages written from cache',
+ 'pthread mutex condition wait calls',
+ 'cursor creation',
+ 'Btree cursor insert calls',
+ 'Btree cursor next calls',
+ 'Btree cursor prev calls',
+ 'Btree cursor remove calls',
+ 'Btree cursor reset calls',
+ 'Btree cursor search calls',
+ 'Btree cursor search near calls',
+ 'Btree cursor update calls',
+ 'rows merged in an LSM tree',
+ 'total heap memory allocations',
+ 'total heap memory frees',
+ 'total heap memory re-allocations',
+ 'total read I/Os',
+ 'page reconciliation calls',
+ 'page reconciliation calls for eviction',
+ 'reconciliation failed because an update could not be included',
+ 'pthread mutex shared lock read-lock calls',
+ 'pthread mutex shared lock write-lock calls',
+ 'ancient transactions',
+ 'transactions',
+ 'transaction checkpoints',
+ 'transactions committed',
+ 'transaction failures due to cache overflow',
+ 'transactions rolled-back',
+ 'total write I/Os',
+ 'blocks allocated',
+ 'block allocations requiring file extension',
+ 'blocks freed',
+ 'bloom filter false positives',
+ 'bloom filter hits',
+ 'bloom filter misses',
+ 'bloom filter pages evicted from cache',
+ 'bloom filter pages read into cache',
+ 'pages rewritten by compaction',
+ 'bytes read into cache',
+ 'bytes written from cache',
+ 'cache: checkpoint blocked page eviction',
+ 'unmodified pages evicted',
+ 'modified pages evicted',
+ 'data source pages selected for eviction unable to be evicted',
+ 'cache: pages queued for forced eviction',
+ 'cache: hazard pointer blocked page eviction',
+ 'internal pages evicted',
+ 'cache: internal page merge operations completed',
+ 'cache: internal page merge attempts that could not complete',
+ 'cache: internal levels merged',
+ 'pages read into cache',
+ 'overflow pages read into cache',
+ 'pages written from cache',
+ 'raw compression call failed, no additional data available',
+ 'raw compression call failed, additional data available',
+ 'raw compression call succeeded',
+ 'compressed pages read',
+ 'compressed pages written',
+ 'page written failed to compress',
+ 'page written was too small to compress',
+ 'cursor creation',
+ 'cursor insert calls',
+ 'bulk-loaded cursor-insert calls',
+ 'cursor-insert key and value bytes inserted',
+ 'cursor next calls',
+ 'cursor prev calls',
+ 'cursor remove calls',
+ 'cursor-remove key bytes removed',
+ 'cursor reset calls',
+ 'cursor search calls',
+ 'cursor search near calls',
+ 'cursor update calls',
+ 'cursor-update value bytes updated',
+ 'queries that could have benefited from a Bloom filter that did not exist',
+ 'reconciliation dictionary matches',
+ 'reconciliation overflow keys written',
+ 'reconciliation overflow values written',
+ 'reconciliation pages deleted',
+ 'reconciliation pages merged',
+ 'page reconciliation calls',
+ 'page reconciliation calls for eviction',
+ 'reconciliation failed because an update could not be included',
+ 'reconciliation internal pages split',
+ 'reconciliation leaf pages split',
+ 'object compaction',
+ 'update conflicts',
+ 'write generation conflicts',
+]
+# scale-per-second list section: END
+
# Plot a set of entries for a title.
def plot(title, values, num):
# Ignore entries where the value never changes.
@@ -49,18 +161,13 @@ def plot(title, values, num):
print 'building ' + title
ylabel = 'Value'
-
- # Most statistics are operation or event counts that can reasonably be
- # scaled to a value per second, but some (such as the number of bytes in
- # the cache or the number of files currently open) can't. This is our
- # heuristic for distinguishing between them.
- if 'currently' in title or 'in the cache' in title:
- seconds = 1
- else:
+ if title.split(' ', 1)[1] in scale_per_second_list:
t1, v1 = values[1]
seconds = (datetime.strptime(t1, TIMEFMT) -
datetime.strptime(t0, TIMEFMT)).seconds
ylabel += ' per second'
+ else:
+ seconds = 1
# Write the raw data into a file for processing.
of = open("reports/raw/report.%s.raw" % num, "w")