summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/testing/profiling.py
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2014-08-16 19:49:07 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2014-08-16 19:49:07 -0400
commitb577afcb2bdcd94581606bc911968d8885509769 (patch)
tree8ee4e1456bdcc84bd6cf6e25dda51e9338770150 /lib/sqlalchemy/testing/profiling.py
parent589f205d53f031ceb297af760f2acfc777a5bc5d (diff)
downloadsqlalchemy-b577afcb2bdcd94581606bc911968d8885509769.tar.gz
- rework profiling, zoomark tests into single tests so that
they can be used under xdist
Diffstat (limited to 'lib/sqlalchemy/testing/profiling.py')
-rw-r--r--lib/sqlalchemy/testing/profiling.py216
1 files changed, 78 insertions, 138 deletions
diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py
index 75baec987..fcb888f86 100644
--- a/lib/sqlalchemy/testing/profiling.py
+++ b/lib/sqlalchemy/testing/profiling.py
@@ -14,13 +14,12 @@ in a more fine-grained way than nose's profiling plugin.
import os
import sys
-from .util import gc_collect, decorator
+from .util import gc_collect
from . import config
from .plugin.plugin_base import SkipTest
import pstats
-import time
import collections
-from .. import util
+import contextlib
try:
import cProfile
@@ -30,64 +29,8 @@ from ..util import jython, pypy, win32, update_wrapper
_current_test = None
-
-def profiled(target=None, **target_opts):
- """Function profiling.
-
- @profiled()
- or
- @profiled(report=True, sort=('calls',), limit=20)
-
- Outputs profiling info for a decorated function.
-
- """
-
- profile_config = {'targets': set(),
- 'report': True,
- 'print_callers': False,
- 'print_callees': False,
- 'graphic': False,
- 'sort': ('time', 'calls'),
- 'limit': None}
- if target is None:
- target = 'anonymous_target'
-
- @decorator
- def decorate(fn, *args, **kw):
- elapsed, load_stats, result = _profile(
- fn, *args, **kw)
-
- graphic = target_opts.get('graphic', profile_config['graphic'])
- if graphic:
- os.system("runsnake %s" % filename)
- else:
- report = target_opts.get('report', profile_config['report'])
- if report:
- sort_ = target_opts.get('sort', profile_config['sort'])
- limit = target_opts.get('limit', profile_config['limit'])
- print(("Profile report for target '%s'" % (
- target, )
- ))
-
- stats = load_stats()
- stats.sort_stats(*sort_)
- if limit:
- stats.print_stats(limit)
- else:
- stats.print_stats()
-
- print_callers = target_opts.get(
- 'print_callers', profile_config['print_callers'])
- if print_callers:
- stats.print_callers()
-
- print_callees = target_opts.get(
- 'print_callees', profile_config['print_callees'])
- if print_callees:
- stats.print_callees()
-
- return result
- return decorate
+# ProfileStatsFile instance, set up in plugin_base
+_profile_stats = None
class ProfileStatsFile(object):
@@ -177,20 +120,23 @@ class ProfileStatsFile(object):
self._write()
def _header(self):
- return \
- "# %s\n"\
- "# This file is written out on a per-environment basis.\n"\
- "# For each test in aaa_profiling, the corresponding function and \n"\
- "# environment is located within this file. If it doesn't exist,\n"\
- "# the test is skipped.\n"\
- "# If a callcount does exist, it is compared to what we received. \n"\
- "# assertions are raised if the counts do not match.\n"\
- "# \n"\
- "# To add a new callcount test, apply the function_call_count \n"\
- "# decorator and re-run the tests using the --write-profiles \n"\
- "# option - this file will be rewritten including the new count.\n"\
- "# \n"\
- "" % (self.fname)
+ return (
+ "# %s\n"
+ "# This file is written out on a per-environment basis.\n"
+ "# For each test in aaa_profiling, the corresponding "
+ "function and \n"
+ "# environment is located within this file. "
+ "If it doesn't exist,\n"
+ "# the test is skipped.\n"
+ "# If a callcount does exist, it is compared "
+ "to what we received. \n"
+ "# assertions are raised if the counts do not match.\n"
+ "# \n"
+ "# To add a new callcount test, apply the function_call_count \n"
+ "# decorator and re-run the tests using the --write-profiles \n"
+ "# option - this file will be rewritten including the new count.\n"
+ "# \n"
+ ) % (self.fname)
def _read(self):
try:
@@ -239,72 +185,66 @@ def function_call_count(variance=0.05):
def decorate(fn):
def wrap(*args, **kw):
-
- if cProfile is None:
- raise SkipTest("cProfile is not installed")
-
- if not _profile_stats.has_stats() and not _profile_stats.write:
- # run the function anyway, to support dependent tests
- # (not a great idea but we have these in test_zoomark)
- fn(*args, **kw)
- raise SkipTest("No profiling stats available on this "
- "platform for this function. Run tests with "
- "--write-profiles to add statistics to %s for "
- "this platform." % _profile_stats.short_fname)
-
- gc_collect()
-
- timespent, load_stats, fn_result = _profile(
- fn, *args, **kw
- )
- stats = load_stats()
- callcount = stats.total_calls
-
- expected = _profile_stats.result(callcount)
- if expected is None:
- expected_count = None
- else:
- line_no, expected_count = expected
-
- print(("Pstats calls: %d Expected %s" % (
- callcount,
- expected_count
- )
- ))
- stats.print_stats()
- # stats.print_callers()
-
- if expected_count:
- deviance = int(callcount * variance)
- failed = abs(callcount - expected_count) > deviance
-
- if failed:
- if _profile_stats.write:
- _profile_stats.replace(callcount)
- else:
- raise AssertionError(
- "Adjusted function call count %s not within %s%% "
- "of expected %s. Rerun with --write-profiles to "
- "regenerate this callcount."
- % (
- callcount, (variance * 100),
- expected_count))
- return fn_result
+ with count_functions(variance=variance):
+ return fn(*args, **kw)
return update_wrapper(wrap, fn)
return decorate
-def _profile(fn, *args, **kw):
- filename = "%s.prof" % fn.__name__
-
- def load_stats():
- st = pstats.Stats(filename)
- os.unlink(filename)
- return st
+@contextlib.contextmanager
+def count_functions(variance=0.05):
+ if cProfile is None:
+ raise SkipTest("cProfile is not installed")
+
+ if not _profile_stats.has_stats() and not _profile_stats.write:
+ raise SkipTest("No profiling stats available on this "
+ "platform for this function. Run tests with "
+ "--write-profiles to add statistics to %s for "
+ "this platform." % _profile_stats.short_fname)
+
+ gc_collect()
+
+ pr = cProfile.Profile()
+ pr.enable()
+ #began = time.time()
+ yield
+ #ended = time.time()
+ pr.disable()
+
+ #s = compat.StringIO()
+ stats = pstats.Stats(pr, stream=sys.stdout)
+
+ #timespent = ended - began
+ callcount = stats.total_calls
+
+ expected = _profile_stats.result(callcount)
+ if expected is None:
+ expected_count = None
+ else:
+ line_no, expected_count = expected
+
+ print(("Pstats calls: %d Expected %s" % (
+ callcount,
+ expected_count
+ )
+ ))
+ stats.sort_stats("cumulative")
+ stats.print_stats()
+
+ if expected_count:
+ deviance = int(callcount * variance)
+ failed = abs(callcount - expected_count) > deviance
+
+ if failed:
+ if _profile_stats.write:
+ _profile_stats.replace(callcount)
+ else:
+ raise AssertionError(
+ "Adjusted function call count %s not within %s%% "
+ "of expected %s. Rerun with --write-profiles to "
+ "regenerate this callcount."
+ % (
+ callcount, (variance * 100),
+ expected_count))
- began = time.time()
- cProfile.runctx('result = fn(*args, **kw)', globals(), locals(),
- filename=filename)
- ended = time.time()
- return ended - began, load_stats, locals()['result']