summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Kirtland <jek@discorporate.us>2008-04-03 17:08:08 +0000
committerJason Kirtland <jek@discorporate.us>2008-04-03 17:08:08 +0000
commitd78f39d0057bbc648a9af31d7bd3ead2895ee178 (patch)
tree62877dffe613060d8963438f29480693dee9e9bd
parentca1ad4cbb9af8a45da550ba07c476f8cac17cd7a (diff)
downloadsqlalchemy-d78f39d0057bbc648a9af31d7bd3ead2895ee178.tar.gz
- Experimental: prefer cProfile over hotspot for 2.5+
- The latest skirmish in the battle against zoomark and sanity: 3rd party code is factored out in the function call count canary tests
-rw-r--r--test/profiling/compiler.py6
-rw-r--r--test/profiling/pool.py17
-rw-r--r--test/profiling/zoomark.py94
-rw-r--r--test/testlib/compat.py16
-rw-r--r--test/testlib/engines.py93
-rw-r--r--test/testlib/profiling.py91
6 files changed, 251 insertions, 66 deletions
diff --git a/test/profiling/compiler.py b/test/profiling/compiler.py
index 7062a7e29..4e1111aa2 100644
--- a/test/profiling/compiler.py
+++ b/test/profiling/compiler.py
@@ -15,15 +15,15 @@ class CompileTest(TestBase, AssertsExecutionResults):
Column('c1', Integer, primary_key=True),
Column('c2', String(30)))
- @profiling.function_call_count(42, {'2.3': 44})
+ @profiling.function_call_count(74, {'2.3': 44, '2.4': 42})
def test_insert(self):
t1.insert().compile()
- @profiling.function_call_count(42, {'2.3': 47})
+ @profiling.function_call_count(75, {'2.3': 47, '2.4': 42})
def test_update(self):
t1.update().compile()
- @profiling.function_call_count(120, versions={'2.3': 153, '2.4':116})
+ @profiling.function_call_count(228, versions={'2.3': 153, '2.4':116})
def test_select(self):
s = select([t1], t1.c.c2==t2.c.c1)
s.compile()
diff --git a/test/profiling/pool.py b/test/profiling/pool.py
index a8167a1ba..12b52e610 100644
--- a/test/profiling/pool.py
+++ b/test/profiling/pool.py
@@ -2,12 +2,18 @@ import testenv; testenv.configure_for_tests()
from sqlalchemy import *
from testlib import *
from sqlalchemy.pool import QueuePool
-from sqlalchemy.databases import sqlite
+
class QueuePoolTest(TestBase, AssertsExecutionResults):
+ class Connection(object):
+ def close(self):
+ pass
+
def setUp(self):
global pool
- pool = QueuePool(creator = lambda: sqlite.SQLiteDialect.dbapi().connect(':memory:'), pool_size = 3, max_overflow = -1, use_threadlocal = True)
+ pool = QueuePool(creator=lambda: self.Connection,
+ pool_size=3, max_overflow=-1,
+ use_threadlocal=True)
# the WeakValueDictionary used for the pool's "threadlocal" idea adds 1-6
# method calls to each of these. however its just a lot easier stability
@@ -16,7 +22,7 @@ class QueuePoolTest(TestBase, AssertsExecutionResults):
# and though the solution there is simple, it still doesn't solve the
# issue of "dead" weakrefs sitting in the dict taking up space
- @profiling.profiled('pooltest_connect', call_range=(40, 50), always=True)
+ @profiling.function_call_count(63, {'2.3': 42, '2.4': 43})
def test_first_connect(self):
conn = pool.connect()
@@ -24,7 +30,7 @@ class QueuePoolTest(TestBase, AssertsExecutionResults):
conn = pool.connect()
conn.close()
- @profiling.profiled('pooltest_second_connect', call_range=(24, 24), always=True)
+ @profiling.function_call_count(39, {'2.3': 26, '2.4': 26})
def go():
conn2 = pool.connect()
return conn2
@@ -33,10 +39,11 @@ class QueuePoolTest(TestBase, AssertsExecutionResults):
def test_second_samethread_connect(self):
conn = pool.connect()
- @profiling.profiled('pooltest_samethread_connect', call_range=(4, 4), always=True)
+ @profiling.function_call_count(7, {'2.3': 4, '2.4': 4})
def go():
return pool.connect()
c2 = go()
+
if __name__ == '__main__':
testenv.main()
diff --git a/test/profiling/zoomark.py b/test/profiling/zoomark.py
index 1606b8103..2f75c6d01 100644
--- a/test/profiling/zoomark.py
+++ b/test/profiling/zoomark.py
@@ -6,33 +6,40 @@ An adaptation of Robert Brewers' ZooMark speed tests.
import datetime
import sys
import time
+import unittest
import testenv; testenv.configure_for_tests()
from sqlalchemy import *
from testlib import *
ITERATIONS = 1
-# Use with conditional_call_count() if VM state of a full-suite run
-# throws off profiling numbers.
-_run_type = 'suite'
-_running_in = lambda: _run_type
+dbapi_session = engines.ReplayableSession()
+metadata = None
-class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
+class ZooMarkTest(TestBase):
"""Runs the ZooMark and squawks if method counts vary from the norm.
Each test has an associated `call_range`, the total number of accepted
function calls made during the test. The count can vary between Python
2.4 and 2.5.
+
+ Unlike a unit test, this is a ordered collection of steps. Running
+ components individually will fail.
+
"""
__only_on__ = 'postgres'
__skip_if__ = ((lambda: sys.version_info < (2, 4)), )
- @profiling.function_call_count(2028)
- def test_1_create_tables(self):
+ def test_baseline_0_setup(self):
global metadata
- metadata = MetaData(testing.db)
+ creator = testing.db.pool._creator
+ recorder = lambda: dbapi_session.recorder(creator())
+ engine = engines.testing_engine(options={'creator':recorder})
+ metadata = MetaData(engine)
+
+ def test_baseline_1_create_tables(self):
Zoo = Table('Zoo', metadata,
Column('ID', Integer, Sequence('zoo_id_seq'),
primary_key=True, index=True),
@@ -59,8 +66,7 @@ class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
)
metadata.create_all()
- @profiling.function_call_count(3635)
- def test_1a_populate(self):
+ def test_baseline_1a_populate(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
@@ -126,15 +132,13 @@ class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
Animal.insert().execute(Species=u'Ape', Name=u'Hua Mei', Legs=2,
MotherID=bai_yun)
- @profiling.function_call_count(195)
- def test_2_insert(self):
+ def test_baseline_2_insert(self):
Animal = metadata.tables['Animal']
i = Animal.insert()
for x in xrange(ITERATIONS):
tick = i.execute(Species=u'Tick', Name=u'Tick %d' % x, Legs=8)
- @profiling.function_call_count(2740, {'2.4':2764})
- def test_3_properties(self):
+ def test_baseline_3_properties(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
@@ -155,8 +159,7 @@ class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
millipede = fullobject(Animal.select(Animal.c.Legs==1000000))
ticks = fullobject(Animal.select(Animal.c.Species==u'Tick'))
- @profiling.function_call_count(10384, {'2.4': 11177})
- def test_4_expressions(self):
+ def test_baseline_4_expressions(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
@@ -208,8 +211,7 @@ class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
assert len(fulltable(Animal.select(func.date_part('month', Animal.c.LastEscape) == 12))) == 1
assert len(fulltable(Animal.select(func.date_part('day', Animal.c.LastEscape) == 21))) == 1
- @profiling.function_call_count(1046, {'2.4': 1088})
- def test_5_aggregates(self):
+ def test_baseline_5_aggregates(self):
Animal = metadata.tables['Animal']
Zoo = metadata.tables['Zoo']
@@ -249,8 +251,7 @@ class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
select([Animal.c.Legs], distinct=True).execute().fetchall()]
legs.sort()
- @profiling.function_call_count(1116, {'2.4':1123})
- def test_6_editing(self):
+ def test_baseline_6_editing(self):
Zoo = metadata.tables['Zoo']
for x in xrange(ITERATIONS):
@@ -277,8 +278,7 @@ class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
SDZ = Zoo.select(Zoo.c.Name==u'San Diego Zoo').execute().fetchone()
assert SDZ['Founded'] == datetime.date(1935, 9, 13)
- @profiling.function_call_count(2139, {'2.4': 2198})
- def test_7_multiview(self):
+ def test_baseline_7_multiview(self):
Zoo = metadata.tables['Zoo']
Animal = metadata.tables['Animal']
@@ -306,10 +306,56 @@ class ZooMarkTest(testing.TestBase, AssertsExecutionResults):
e = fulltable(select([Zoo.c.Name, Animal.c.Species],
from_obj=[outerjoin(Animal, Zoo)]))
- def test_8_drop(self):
+ def test_baseline_8_drop(self):
metadata.drop_all()
+ # Now, run all of these tests again with the DB-API driver factored out:
+ # the ReplayableSession playback stands in for the database.
+
+ # How awkward is this in a unittest framework? Very.
+
+ def test_profile_0(self):
+ global metadata
+
+ player = lambda: dbapi_session.player()
+ engine = create_engine('postgres:///', creator=player)
+ metadata = MetaData(engine)
+
+ @profiling.function_call_count(3230, {'2.4': 1796})
+ def test_profile_1_create_tables(self):
+ self.test_baseline_1_create_tables()
+
+ @profiling.function_call_count(6064, {'2.4': 3635})
+ def test_profile_1a_populate(self):
+ self.test_baseline_1a_populate()
+
+ @profiling.function_call_count(339, {'2.4': 195})
+ def test_profile_2_insert(self):
+ self.test_baseline_2_insert()
+
+ @profiling.function_call_count(4923, {'2.4': 2557})
+ def test_profile_3_properties(self):
+ self.test_baseline_3_properties()
+
+ @profiling.function_call_count(18119, {'2.4': 10549})
+ def test_profile_4_expressions(self):
+ self.test_baseline_4_expressions()
+
+ @profiling.function_call_count(1617, {'2.4': 1032})
+ def test_profile_5_aggregates(self):
+ self.test_baseline_5_aggregates()
+
+ @profiling.function_call_count(1988, {'2.4': 1048})
+ def test_profile_6_editing(self):
+ self.test_baseline_6_editing()
+
+ @profiling.function_call_count(3614, {'2.4': 2198})
+ def test_profile_7_multiview(self):
+ self.test_baseline_7_multiview()
+
+ def test_profile_8_drop(self):
+ self.test_baseline_8_drop()
+
if __name__ == '__main__':
- _run_type = 'isolation'
testenv.main()
diff --git a/test/testlib/compat.py b/test/testlib/compat.py
index 4f2006afd..ba12b78ac 100644
--- a/test/testlib/compat.py
+++ b/test/testlib/compat.py
@@ -1,6 +1,6 @@
import itertools, new, sys, warnings
-__all__ = 'set', 'frozenset', 'sorted', '_function_named'
+__all__ = 'set', 'frozenset', 'sorted', '_function_named', 'deque'
try:
set = set
@@ -68,6 +68,20 @@ except NameError:
l.sort()
return l
+try:
+ from collections import deque
+except ImportError:
+ class deque(list):
+ def appendleft(self, x):
+ self.insert(0, x)
+ def popleft(self):
+ return self.pop(0)
+ def extendleft(self, iterable):
+ items = list(iterable)
+ items.reverse()
+ for x in items:
+ self.insert(0, x)
+
def _function_named(fn, newname):
try:
fn.__name__ = newname
diff --git a/test/testlib/engines.py b/test/testlib/engines.py
index 8cb321597..f5694df57 100644
--- a/test/testlib/engines.py
+++ b/test/testlib/engines.py
@@ -1,4 +1,4 @@
-import sys, weakref
+import sys, types, weakref
from testlib import config
from testlib.compat import *
@@ -131,3 +131,94 @@ def utf8_engine(url=None, options=None):
url = str(url)
return testing_engine(url, options)
+
+
+class ReplayableSession(object):
+ """A simple record/playback tool.
+
+ This is *not* a mock testing class. It only records a session for later
+ playback and makes no assertions on call consistency whatsoever. It's
+ unlikely to be suitable for anything other than DB-API recording.
+
+ """
+
+ Callable = object()
+ NoAttribute = object()
+ Natives = set([getattr(types, t)
+ for t in dir(types) if not t.startswith('_')]). \
+ difference([getattr(types, t)
+ for t in ('FunctionType', 'BuiltinFunctionType',
+ 'MethodType', 'BuiltinMethodType',
+ 'LambdaType', 'UnboundMethodType',)])
+ def __init__(self):
+ self.buffer = deque()
+
+ def recorder(self, base):
+ return self.Recorder(self.buffer, base)
+
+ def player(self):
+ return self.Player(self.buffer)
+
+ class Recorder(object):
+ def __init__(self, buffer, subject):
+ self._buffer = buffer
+ self._subject = subject
+
+ def __call__(self, *args, **kw):
+ subject, buffer = [object.__getattribute__(self, x)
+ for x in ('_subject', '_buffer')]
+
+ result = subject(*args, **kw)
+ if type(result) not in ReplayableSession.Natives:
+ buffer.append(ReplayableSession.Callable)
+ return type(self)(buffer, result)
+ else:
+ buffer.append(result)
+ return result
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, key)
+ except AttributeError:
+ pass
+
+ subject, buffer = [object.__getattribute__(self, x)
+ for x in ('_subject', '_buffer')]
+ try:
+ result = type(subject).__getattribute__(subject, key)
+ except AttributeError:
+ buffer.append(ReplayableSession.NoAttribute)
+ raise
+ else:
+ if type(result) not in ReplayableSession.Natives:
+ buffer.append(ReplayableSession.Callable)
+ return type(self)(buffer, result)
+ else:
+ buffer.append(result)
+ return result
+
+ class Player(object):
+ def __init__(self, buffer):
+ self._buffer = buffer
+
+ def __call__(self, *args, **kw):
+ buffer = object.__getattribute__(self, '_buffer')
+ result = buffer.popleft()
+ if result is ReplayableSession.Callable:
+ return self
+ else:
+ return result
+
+ def __getattribute__(self, key):
+ try:
+ return object.__getattribute__(self, key)
+ except AttributeError:
+ pass
+ buffer = object.__getattribute__(self, '_buffer')
+ result = buffer.popleft()
+ if result is ReplayableSession.Callable:
+ return self
+ elif result is ReplayableSession.NoAttribute:
+ raise AttributeError(key)
+ else:
+ return result
diff --git a/test/testlib/profiling.py b/test/testlib/profiling.py
index 54a96db47..edaeabdad 100644
--- a/test/testlib/profiling.py
+++ b/test/testlib/profiling.py
@@ -12,6 +12,7 @@ profile_config = { 'targets': set(),
'report': True,
'sort': ('time', 'calls'),
'limit': None }
+profiler = None
def profiled(target=None, **target_opts):
"""Optional function profiling.
@@ -42,19 +43,12 @@ def profiled(target=None, **target_opts):
not target_opts.get('always', None)):
return fn(*args, **kw)
- prof = hotshot.Profile(filename)
- began = time.time()
- prof.start()
- try:
- result = fn(*args, **kw)
- finally:
- prof.stop()
- ended = time.time()
- prof.close()
+ elapsed, load_stats, result = _profile(
+ filename, fn, *args, **kw)
if not testlib.config.options.quiet:
print "Profiled target '%s', wall time: %.2f seconds" % (
- target, ended - began)
+ target, elapsed)
report = target_opts.get('report', profile_config['report'])
if report and testlib.config.options.verbose:
@@ -63,20 +57,13 @@ def profiled(target=None, **target_opts):
print "Profile report for target '%s' (%s)" % (
target, filename)
- stats = hotshot.stats.load(filename)
+ stats = load_stats()
stats.sort_stats(*sort_)
if limit:
stats.print_stats(limit)
else:
stats.print_stats()
- assert_range = target_opts.get('call_range')
- if assert_range:
- if isinstance(assert_range, dict):
- assert_range = assert_range.get(testlib.config.db, 'default')
- stats = hotshot.stats.load(filename)
- assert stats.total_calls >= assert_range[0] and stats.total_calls <= assert_range[1], stats.total_calls
-
os.unlink(filename)
return result
return _function_named(profiled, fn.__name__)
@@ -118,23 +105,21 @@ def function_call_count(count=None, versions={}, variance=0.05):
if count is None:
return lambda fn: fn
- import hotshot, hotshot.stats
-
def decorator(fn):
def counted(*args, **kw):
try:
filename = "%s.prof" % fn.__name__
- prof = hotshot.Profile(filename)
- prof.start()
- try:
- result = fn(*args, **kw)
- finally:
- prof.stop()
- prof.close()
+ elapsed, stat_loader, result = _profile(
+ filename, fn, *args, **kw)
- stats = hotshot.stats.load(filename)
+ stats = stat_loader()
calls = stats.total_calls
+
+ if testlib.config.options.verbose:
+ stats.sort_stats('calls', 'cumulative')
+ stats.print_stats()
+
deviance = int(count * variance)
if (calls < (count - deviance) or
calls > (count + deviance)):
@@ -143,10 +128,6 @@ def function_call_count(count=None, versions={}, variance=0.05):
"of expected %s. (Python version %s)" % (
calls, (variance * 100), count, py_version))
- if testlib.config.options.verbose:
- stats.sort_stats('calls', 'cumulative')
- stats.print_stats()
-
return result
finally:
if os.path.exists(filename):
@@ -179,3 +160,49 @@ def conditional_call_count(discriminator, categories):
return rewrapped(*args, **kw)
return _function_named(at_runtime, fn.__name__)
return decorator
+
+
+def _profile(filename, fn, *args, **kw):
+ global profiler
+ if not profiler:
+ try:
+ import cProfile
+ profiler = 'cProfile'
+ except ImportError:
+ profiler = 'hotshot'
+
+ if profiler == 'cProfile':
+ return _profile_cProfile(filename, fn, *args, **kw)
+ else:
+ return _profile_hotshot(filename, fn, *args, **kw)
+
+def _profile_cProfile(filename, fn, *args, **kw):
+ import cProfile, gc, pstats, time
+
+ load_stats = lambda: pstats.Stats(filename)
+ gc.collect()
+
+ began = time.time()
+ cProfile.runctx('result = fn(*args, **kw)', globals(), locals(),
+ filename=filename)
+ ended = time.time()
+
+ return ended - began, load_stats, locals()['result']
+
+def _profile_hotshot(filename, fn, *args, **kw):
+ import gc, hotshot, hotshot.stats, time
+ load_stats = lambda: hotshot.stats.load(filename)
+
+ gc.collect()
+ prof = hotshot.Profile(filename)
+ began = time.time()
+ prof.start()
+ try:
+ result = fn(*args, **kw)
+ finally:
+ prof.stop()
+ ended = time.time()
+ prof.close()
+
+ return ended - began, load_stats, result
+