summaryrefslogtreecommitdiff
path: root/coverage
diff options
context:
space:
mode:
Diffstat (limited to 'coverage')
-rw-r--r--coverage/__init__.py48
-rw-r--r--coverage/__main__.py2
-rw-r--r--coverage/backward.py41
-rw-r--r--coverage/bytecode.py53
-rw-r--r--coverage/cmdline.py345
-rw-r--r--coverage/codeunit.py24
-rw-r--r--coverage/collector.py55
-rw-r--r--coverage/config.py213
-rw-r--r--coverage/control.py304
-rw-r--r--coverage/data.py43
-rw-r--r--coverage/debug.py54
-rw-r--r--coverage/execfile.py11
-rw-r--r--coverage/files.py116
-rw-r--r--coverage/fullcoverage/encodings.py13
-rw-r--r--coverage/html.py19
-rw-r--r--coverage/htmlfiles/coverage_html.js1
-rw-r--r--coverage/htmlfiles/index.html8
-rw-r--r--coverage/htmlfiles/pyfile.html2
-rw-r--r--coverage/htmlfiles/style.css4
-rw-r--r--coverage/misc.py22
-rw-r--r--coverage/parser.py233
-rw-r--r--coverage/phystokens.py2
-rw-r--r--coverage/report.py5
-rw-r--r--coverage/results.py25
-rw-r--r--coverage/summary.py6
-rw-r--r--coverage/tracer.c29
-rw-r--r--coverage/version.py9
-rw-r--r--coverage/xmlreport.py20
28 files changed, 1097 insertions, 610 deletions
diff --git a/coverage/__init__.py b/coverage/__init__.py
index f4e17b29..193b7a10 100644
--- a/coverage/__init__.py
+++ b/coverage/__init__.py
@@ -5,19 +5,13 @@ http://nedbatchelder.com/code/coverage
"""
-__version__ = "3.5.3b1" # see detailed history in CHANGES.txt
-
-__url__ = "http://nedbatchelder.com/code/coverage"
-if max(__version__).isalpha():
- # For pre-releases, use a version-specific URL.
- __url__ += "/" + __version__
+from coverage.version import __version__, __url__
from coverage.control import coverage, process_startup
from coverage.data import CoverageData
from coverage.cmdline import main, CoverageScript
from coverage.misc import CoverageException
-
# Module-level functions. The original API to this module was based on
# functions defined directly in the module, with a singleton of the coverage()
# class. That design hampered programmability, so the current api uses
@@ -36,12 +30,34 @@ def _singleton_method(name):
called.
"""
+ # Disable pylint msg W0612, because a bunch of variables look unused, but
+ # they're accessed via locals().
+ # pylint: disable=W0612
+
def wrapper(*args, **kwargs):
"""Singleton wrapper around a coverage method."""
global _the_coverage
if not _the_coverage:
_the_coverage = coverage(auto_data=True)
return getattr(_the_coverage, name)(*args, **kwargs)
+
+ import inspect
+ meth = getattr(coverage, name)
+ args, varargs, kw, defaults = inspect.getargspec(meth)
+ argspec = inspect.formatargspec(args[1:], varargs, kw, defaults)
+ docstring = meth.__doc__
+ wrapper.__doc__ = ("""\
+ A first-use-singleton wrapper around coverage.%(name)s.
+
+ This wrapper is provided for backward compatibility with legacy code.
+ New code should use coverage.%(name)s directly.
+
+ %(name)s%(argspec)s:
+
+ %(docstring)s
+ """ % locals()
+ )
+
return wrapper
@@ -57,10 +73,26 @@ report = _singleton_method('report')
annotate = _singleton_method('annotate')
+# On Windows, we encode and decode deep enough that something goes wrong and
+# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
+# Adding a reference here prevents it from being unloaded. Yuk.
+import encodings.utf_8
+
+# Because of the "from coverage.control import fooey" lines at the top of the
+# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
+# This makes some inspection tools (like pydoc) unable to find the class
+# coverage.coverage. So remove that entry.
+import sys
+try:
+ del sys.modules['coverage.coverage']
+except KeyError:
+ pass
+
+
# COPYRIGHT AND LICENSE
#
# Copyright 2001 Gareth Rees. All rights reserved.
-# Copyright 2004-2011 Ned Batchelder. All rights reserved.
+# Copyright 2004-2013 Ned Batchelder. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
diff --git a/coverage/__main__.py b/coverage/__main__.py
index 111ca2e0..55e0d259 100644
--- a/coverage/__main__.py
+++ b/coverage/__main__.py
@@ -1,4 +1,4 @@
-"""Coverage.py's main entrypoint."""
+"""Coverage.py's main entry point."""
import sys
from coverage.cmdline import main
sys.exit(main())
diff --git a/coverage/backward.py b/coverage/backward.py
index 637a5976..54e46254 100644
--- a/coverage/backward.py
+++ b/coverage/backward.py
@@ -24,6 +24,31 @@ except NameError:
lst.sort()
return lst
+# Python 2.3 doesn't have `reversed`.
+try:
+ reversed = reversed
+except NameError:
+ def reversed(iterable):
+ """A 2.3-compatible implementation of `reversed`."""
+ lst = list(iterable)
+ return lst[::-1]
+
+# rpartition is new in 2.5
+try:
+ "".rpartition
+except AttributeError:
+ def rpartition(s, sep):
+ """Implement s.rpartition(sep) for old Pythons."""
+ i = s.rfind(sep)
+ if i == -1:
+ return ('', '', s)
+ else:
+ return (s[:i], sep, s[i+len(sep):])
+else:
+ def rpartition(s, sep):
+ """A common interface for new Pythons."""
+ return s.rpartition(sep)
+
# Pythons 2 and 3 differ on where to get StringIO
try:
from cStringIO import StringIO
@@ -49,6 +74,16 @@ try:
except NameError:
range = range
+# A function to iterate listlessly over a dict's items.
+if "iteritems" in dir({}):
+ def iitems(d):
+ """Produce the items from dict `d`."""
+ return d.iteritems()
+else:
+ def iitems(d):
+ """Produce the items from dict `d`."""
+ return d.items()
+
# Exec is a statement in Py2, a function in Py3
if sys.version_info >= (3, 0):
def exec_code_object(code, global_map):
@@ -66,12 +101,6 @@ else:
)
)
-# ConfigParser was renamed to the more-standard configparser
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-
# Reading Python source and interpreting the coding comment is a big deal.
if sys.version_info >= (3, 0):
# Python 3.2 provides `tokenize.open`, the best way to open source files.
diff --git a/coverage/bytecode.py b/coverage/bytecode.py
index 61c311eb..06bc1dfd 100644
--- a/coverage/bytecode.py
+++ b/coverage/bytecode.py
@@ -27,9 +27,9 @@ class ByteCodes(object):
Returns `ByteCode` objects.
"""
+ # pylint: disable=R0924
def __init__(self, code):
self.code = code
- self.offset = 0
if sys.version_info >= (3, 0):
def __getitem__(self, i):
@@ -39,32 +39,26 @@ class ByteCodes(object):
return ord(self.code[i])
def __iter__(self):
- return self
+ offset = 0
+ while offset < len(self.code):
+ bc = ByteCode()
+ bc.op = self[offset]
+ bc.offset = offset
- def __next__(self):
- if self.offset >= len(self.code):
- raise StopIteration
+ next_offset = offset+1
+ if bc.op >= opcode.HAVE_ARGUMENT:
+ bc.arg = self[offset+1] + 256*self[offset+2]
+ next_offset += 2
- bc = ByteCode()
- bc.op = self[self.offset]
- bc.offset = self.offset
+ label = -1
+ if bc.op in opcode.hasjrel:
+ label = next_offset + bc.arg
+ elif bc.op in opcode.hasjabs:
+ label = bc.arg
+ bc.jump_to = label
- next_offset = self.offset+1
- if bc.op >= opcode.HAVE_ARGUMENT:
- bc.arg = self[self.offset+1] + 256*self[self.offset+2]
- next_offset += 2
-
- label = -1
- if bc.op in opcode.hasjrel:
- label = next_offset + bc.arg
- elif bc.op in opcode.hasjabs:
- label = bc.arg
- bc.jump_to = label
-
- bc.next_offset = self.offset = next_offset
- return bc
-
- next = __next__ # Py2k uses an old-style non-dunder name.
+ bc.next_offset = offset = next_offset
+ yield bc
class CodeObjects(object):
@@ -73,18 +67,11 @@ class CodeObjects(object):
self.stack = [code]
def __iter__(self):
- return self
-
- def __next__(self):
- if self.stack:
+ while self.stack:
# We're going to return the code object on the stack, but first
# push its children for later returning.
code = self.stack.pop()
for c in code.co_consts:
if isinstance(c, types.CodeType):
self.stack.append(c)
- return code
-
- raise StopIteration
-
- next = __next__
+ yield code
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 1ce5e0f5..0881313e 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -1,10 +1,11 @@
"""Command-line support for Coverage."""
-import optparse, re, sys, traceback
+import optparse, os, sys, traceback
from coverage.backward import sorted # pylint: disable=W0622
from coverage.execfile import run_python_file, run_python_module
from coverage.misc import CoverageException, ExceptionDuringRun, NoSource
+from coverage.debug import info_formatter
class Opts(object):
@@ -19,11 +20,18 @@ class Opts(object):
'', '--branch', action='store_true',
help="Measure branch coverage in addition to statement coverage."
)
+ debug = optparse.make_option(
+ '', '--debug', action='store', metavar="OPTS",
+ help="Debug options, separated by commas"
+ )
directory = optparse.make_option(
- '-d', '--directory', action='store',
- metavar="DIR",
+ '-d', '--directory', action='store', metavar="DIR",
help="Write the output files to DIR."
)
+ fail_under = optparse.make_option(
+ '', '--fail-under', action='store', metavar="MIN", type="int",
+ help="Exit with a status of 2 if the total coverage is less than MIN."
+ )
help = optparse.make_option(
'-h', '--help', action='store_true',
help="Get help on this command."
@@ -89,6 +97,10 @@ class Opts(object):
help="Use a simpler but slower trace method. Try this if you get "
"seemingly impossible results!"
)
+ title = optparse.make_option(
+ '', '--title', action='store', metavar="TITLE",
+ help="A text string to use as the title on the HTML."
+ )
version = optparse.make_option(
'', '--version', action='store_true',
help="Display version information and exit."
@@ -110,7 +122,9 @@ class CoverageOptionParser(optparse.OptionParser, object):
self.set_defaults(
actions=[],
branch=None,
+ debug=None,
directory=None,
+ fail_under=None,
help=None,
ignore_errors=None,
include=None,
@@ -122,6 +136,7 @@ class CoverageOptionParser(optparse.OptionParser, object):
show_missing=None,
source=None,
timid=None,
+ title=None,
erase_first=None,
version=None,
)
@@ -273,9 +288,11 @@ CMDS = {
'html': CmdOptionParser("html",
[
Opts.directory,
+ Opts.fail_under,
Opts.ignore_errors,
Opts.omit,
Opts.include,
+ Opts.title,
] + GLOBAL_ARGS,
usage = "[options] [modules]",
description = "Create an HTML report of the coverage of the files. "
@@ -285,6 +302,7 @@ CMDS = {
'report': CmdOptionParser("report",
[
+ Opts.fail_under,
Opts.ignore_errors,
Opts.omit,
Opts.include,
@@ -298,6 +316,7 @@ CMDS = {
[
Opts.append,
Opts.branch,
+ Opts.debug,
Opts.pylib,
Opts.parallel_mode,
Opts.module,
@@ -314,20 +333,20 @@ CMDS = {
'xml': CmdOptionParser("xml",
[
+ Opts.fail_under,
Opts.ignore_errors,
Opts.omit,
Opts.include,
Opts.output_xml,
] + GLOBAL_ARGS,
cmd = "xml",
- defaults = {'outfile': 'coverage.xml'},
usage = "[options] [modules]",
description = "Generate an XML report of coverage results."
),
}
-OK, ERR = 0, 1
+OK, ERR, FAIL_UNDER = 0, 1, 2
class CoverageScript(object):
@@ -346,27 +365,10 @@ class CoverageScript(object):
self.run_python_file = _run_python_file or run_python_file
self.run_python_module = _run_python_module or run_python_module
self.help_fn = _help_fn or self.help
+ self.classic = False
self.coverage = None
- def help(self, error=None, topic=None, parser=None):
- """Display an error message, or the named topic."""
- assert error or topic or parser
- if error:
- print(error)
- print("Use 'coverage help' for help.")
- elif parser:
- print(parser.format_help().strip())
- else:
- # Parse out the topic we want from HELP_TOPICS
- topic_list = re.split("(?m)^=+ (\w+) =+$", HELP_TOPICS)
- topics = dict(zip(topic_list[1::2], topic_list[2::2]))
- help_msg = topics.get(topic, '').strip()
- if help_msg:
- print(help_msg % self.covpkg.__dict__)
- else:
- print("Don't know topic %r" % topic)
-
def command_line(self, argv):
"""The bulk of the command line interface to Coverage.
@@ -376,15 +378,14 @@ class CoverageScript(object):
"""
# Collect the command-line options.
-
if not argv:
self.help_fn(topic='minimum_help')
return OK
# The command syntax we parse depends on the first argument. Classic
# syntax always starts with an option.
- classic = argv[0].startswith('-')
- if classic:
+ self.classic = argv[0].startswith('-')
+ if self.classic:
parser = ClassicOptionParser()
else:
parser = CMDS.get(argv[0])
@@ -398,64 +399,19 @@ class CoverageScript(object):
if not ok:
return ERR
- # Handle help.
- if options.help:
- if classic:
- self.help_fn(topic='help')
- else:
- self.help_fn(parser=parser)
- return OK
-
- if "help" in options.actions:
- if args:
- for a in args:
- parser = CMDS.get(a)
- if parser:
- self.help_fn(parser=parser)
- else:
- self.help_fn(topic=a)
- else:
- self.help_fn(topic='help')
- return OK
-
- # Handle version.
- if options.version:
- self.help_fn(topic='version')
+ # Handle help and version.
+ if self.do_help(options, args, parser):
return OK
# Check for conflicts and problems in the options.
- for i in ['erase', 'execute']:
- for j in ['annotate', 'html', 'report', 'combine']:
- if (i in options.actions) and (j in options.actions):
- self.help_fn("You can't specify the '%s' and '%s' "
- "options at the same time." % (i, j))
- return ERR
-
- if not options.actions:
- self.help_fn(
- "You must specify at least one of -e, -x, -c, -r, -a, or -b."
- )
- return ERR
- args_allowed = (
- 'execute' in options.actions or
- 'annotate' in options.actions or
- 'html' in options.actions or
- 'debug' in options.actions or
- 'report' in options.actions or
- 'xml' in options.actions
- )
- if not args_allowed and args:
- self.help_fn("Unexpected arguments: %s" % " ".join(args))
- return ERR
-
- if 'execute' in options.actions and not args:
- self.help_fn("Nothing to do.")
+ if not self.args_ok(options, args):
return ERR
# Listify the list options.
source = unshell_list(options.source)
omit = unshell_list(options.omit)
include = unshell_list(options.include)
+ debug = unshell_list(options.debug)
# Do something.
self.coverage = self.covpkg.coverage(
@@ -467,41 +423,11 @@ class CoverageScript(object):
source = source,
omit = omit,
include = include,
+ debug = debug,
)
if 'debug' in options.actions:
- if not args:
- self.help_fn("What information would you like: data, sys?")
- return ERR
- for info in args:
- if info == 'sys':
- print("-- sys ----------------------------------------")
- for label, info in self.coverage.sysinfo():
- if info == []:
- info = "-none-"
- if isinstance(info, list):
- print("%15s:" % label)
- for e in info:
- print("%15s %s" % ("", e))
- else:
- print("%15s: %s" % (label, info))
- elif info == 'data':
- print("-- data ---------------------------------------")
- self.coverage.load()
- print("path: %s" % self.coverage.data.filename)
- print("has_arcs: %r" % self.coverage.data.has_arcs())
- summary = self.coverage.data.summary(fullpath=True)
- if summary:
- filenames = sorted(summary.keys())
- print("\n%d files:" % len(filenames))
- for f in filenames:
- print("%s: %d lines" % (f, summary[f]))
- else:
- print("No data collected")
- else:
- self.help_fn("Don't know what you mean by %r" % info)
- return ERR
- return OK
+ return self.do_debug(args)
if 'erase' in options.actions or options.erase_first:
self.coverage.erase()
@@ -509,22 +435,7 @@ class CoverageScript(object):
self.coverage.load()
if 'execute' in options.actions:
- # Run the script.
- self.coverage.start()
- code_ran = True
- try:
- try:
- if options.module:
- self.run_python_module(args[0], args)
- else:
- self.run_python_file(args[0], args)
- except NoSource:
- code_ran = False
- raise
- finally:
- if code_ran:
- self.coverage.stop()
- self.coverage.save()
+ self.do_execute(options, args)
if 'combine' in options.actions:
self.coverage.combine()
@@ -539,18 +450,167 @@ class CoverageScript(object):
)
if 'report' in options.actions:
- self.coverage.report(
+ total = self.coverage.report(
show_missing=options.show_missing, **report_args)
if 'annotate' in options.actions:
self.coverage.annotate(
directory=options.directory, **report_args)
if 'html' in options.actions:
- self.coverage.html_report(
- directory=options.directory, **report_args)
+ total = self.coverage.html_report(
+ directory=options.directory, title=options.title,
+ **report_args)
if 'xml' in options.actions:
outfile = options.outfile
- self.coverage.xml_report(outfile=outfile, **report_args)
+ total = self.coverage.xml_report(outfile=outfile, **report_args)
+ if options.fail_under is not None:
+ if total >= options.fail_under:
+ return OK
+ else:
+ return FAIL_UNDER
+ else:
+ return OK
+
+ def help(self, error=None, topic=None, parser=None):
+ """Display an error message, or the named topic."""
+ assert error or topic or parser
+ if error:
+ print(error)
+ print("Use 'coverage help' for help.")
+ elif parser:
+ print(parser.format_help().strip())
+ else:
+ help_msg = HELP_TOPICS.get(topic, '').strip()
+ if help_msg:
+ print(help_msg % self.covpkg.__dict__)
+ else:
+ print("Don't know topic %r" % topic)
+
+ def do_help(self, options, args, parser):
+ """Deal with help requests.
+
+ Return True if it handled the request, False if not.
+
+ """
+ # Handle help.
+ if options.help:
+ if self.classic:
+ self.help_fn(topic='help')
+ else:
+ self.help_fn(parser=parser)
+ return True
+
+ if "help" in options.actions:
+ if args:
+ for a in args:
+ parser = CMDS.get(a)
+ if parser:
+ self.help_fn(parser=parser)
+ else:
+ self.help_fn(topic=a)
+ else:
+ self.help_fn(topic='help')
+ return True
+
+ # Handle version.
+ if options.version:
+ self.help_fn(topic='version')
+ return True
+
+ return False
+
+ def args_ok(self, options, args):
+ """Check for conflicts and problems in the options.
+
+ Returns True if everything is ok, or False if not.
+
+ """
+ for i in ['erase', 'execute']:
+ for j in ['annotate', 'html', 'report', 'combine']:
+ if (i in options.actions) and (j in options.actions):
+ self.help_fn("You can't specify the '%s' and '%s' "
+ "options at the same time." % (i, j))
+ return False
+
+ if not options.actions:
+ self.help_fn(
+ "You must specify at least one of -e, -x, -c, -r, -a, or -b."
+ )
+ return False
+ args_allowed = (
+ 'execute' in options.actions or
+ 'annotate' in options.actions or
+ 'html' in options.actions or
+ 'debug' in options.actions or
+ 'report' in options.actions or
+ 'xml' in options.actions
+ )
+ if not args_allowed and args:
+ self.help_fn("Unexpected arguments: %s" % " ".join(args))
+ return False
+
+ if 'execute' in options.actions and not args:
+ self.help_fn("Nothing to do.")
+ return False
+
+ return True
+
+ def do_execute(self, options, args):
+ """Implementation of 'coverage run'."""
+
+ # Set the first path element properly.
+ old_path0 = sys.path[0]
+
+ # Run the script.
+ self.coverage.start()
+ code_ran = True
+ try:
+ try:
+ if options.module:
+ sys.path[0] = ''
+ self.run_python_module(args[0], args)
+ else:
+ filename = args[0]
+ sys.path[0] = os.path.abspath(os.path.dirname(filename))
+ self.run_python_file(filename, args)
+ except NoSource:
+ code_ran = False
+ raise
+ finally:
+ self.coverage.stop()
+ if code_ran:
+ self.coverage.save()
+
+ # Restore the old path
+ sys.path[0] = old_path0
+
+ def do_debug(self, args):
+ """Implementation of 'coverage debug'."""
+
+ if not args:
+ self.help_fn("What information would you like: data, sys?")
+ return ERR
+ for info in args:
+ if info == 'sys':
+ print("-- sys ----------------------------------------")
+ for line in info_formatter(self.coverage.sysinfo()):
+ print(" %s" % line)
+ elif info == 'data':
+ print("-- data ---------------------------------------")
+ self.coverage.load()
+ print("path: %s" % self.coverage.data.filename)
+ print("has_arcs: %r" % self.coverage.data.has_arcs())
+ summary = self.coverage.data.summary(fullpath=True)
+ if summary:
+ filenames = sorted(summary.keys())
+ print("\n%d files:" % len(filenames))
+ for f in filenames:
+ print("%s: %d lines" % (f, summary[f]))
+ else:
+ print("No data collected")
+ else:
+ self.help_fn("Don't know what you mean by %r" % info)
+ return ERR
return OK
@@ -568,10 +628,10 @@ def unshell_list(s):
return s.split(',')
-HELP_TOPICS = r"""
-
-== classic ====================================================================
-Coverage.py version %(__version__)s
+HELP_TOPICS = {
+# -------------------------
+'classic':
+r"""Coverage.py version %(__version__)s
Measure, collect, and report on code coverage in Python programs.
Usage:
@@ -615,8 +675,9 @@ coverage -a [-d DIR] [-i] [-o DIR,...] [FILE1 FILE2 ...]
Coverage data is saved in the file .coverage by default. Set the
COVERAGE_FILE environment variable to save it somewhere else.
-
-== help =======================================================================
+""",
+# -------------------------
+'help': """\
Coverage.py, version %(__version__)s
Measure, collect, and report on code coverage in Python programs.
@@ -635,20 +696,22 @@ Commands:
Use "coverage help <command>" for detailed help on any command.
Use "coverage help classic" for help on older command syntax.
For more information, see %(__url__)s
-
-== minimum_help ===============================================================
+""",
+# -------------------------
+'minimum_help': """\
Code coverage for Python. Use 'coverage help' for help.
-
-== version ====================================================================
+""",
+# -------------------------
+'version': """\
Coverage.py, version %(__version__)s. %(__url__)s
-
-"""
+""",
+}
def main(argv=None):
- """The main entrypoint to Coverage.
+ """The main entry point to Coverage.
- This is installed as the script entrypoint.
+ This is installed as the script entry point.
"""
if argv is None:
diff --git a/coverage/codeunit.py b/coverage/codeunit.py
index 1999c50c..ca1ae5c5 100644
--- a/coverage/codeunit.py
+++ b/coverage/codeunit.py
@@ -52,8 +52,10 @@ class CodeUnit(object):
else:
f = morf
# .pyc files should always refer to a .py instead.
- if f.endswith('.pyc'):
+ if f.endswith('.pyc') or f.endswith('.pyo'):
f = f[:-1]
+ elif f.endswith('$py.class'): # Jython
+ f = f[:-9] + ".py"
self.filename = self.file_locator.canonical_filename(f)
if hasattr(morf, '__name__'):
@@ -77,12 +79,18 @@ class CodeUnit(object):
# Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
# of them defined.
- def __lt__(self, other): return self.name < other.name
- def __le__(self, other): return self.name <= other.name
- def __eq__(self, other): return self.name == other.name
- def __ne__(self, other): return self.name != other.name
- def __gt__(self, other): return self.name > other.name
- def __ge__(self, other): return self.name >= other.name
+ def __lt__(self, other):
+ return self.name < other.name
+ def __le__(self, other):
+ return self.name <= other.name
+ def __eq__(self, other):
+ return self.name == other.name
+ def __ne__(self, other):
+ return self.name != other.name
+ def __gt__(self, other):
+ return self.name > other.name
+ def __ge__(self, other):
+ return self.name >= other.name
def flat_rootname(self):
"""A base for a flat filename to correspond to this code unit.
@@ -113,7 +121,7 @@ class CodeUnit(object):
# Couldn't find source.
raise CoverageException(
- "No source for code %r." % self.filename
+ "No source for code '%s'." % self.filename
)
def should_be_python(self):
diff --git a/coverage/collector.py b/coverage/collector.py
index 3fdedaad..9a74700d 100644
--- a/coverage/collector.py
+++ b/coverage/collector.py
@@ -1,12 +1,23 @@
"""Raw data collector for Coverage."""
-import sys, threading
+import os, sys, threading
try:
# Use the C extension code when we can, for speed.
- from coverage.tracer import CTracer
+ from coverage.tracer import CTracer # pylint: disable=F0401,E0611
except ImportError:
# Couldn't import the C extension, maybe it isn't built.
+ if os.getenv('COVERAGE_TEST_TRACER') == 'c':
+ # During testing, we use the COVERAGE_TEST_TRACER env var to indicate
+ # that we've fiddled with the environment to test this fallback code.
+ # If we thought we had a C tracer, but couldn't import it, then exit
+ # quickly and clearly instead of dribbling confusing errors. I'm using
+ # sys.exit here instead of an exception because an exception here
+ # causes all sorts of other noise in unittest.
+ sys.stderr.write(
+ "*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n"
+ )
+ sys.exit(1)
CTracer = None
@@ -40,12 +51,15 @@ class PyTracer(object):
self.last_exc_back = None
self.last_exc_firstlineno = 0
self.arcs = False
+ self.thread = None
def _trace(self, frame, event, arg_unused):
"""The trace function passed to sys.settrace."""
- #print("trace event: %s %r @%d" % (
- # event, frame.f_code.co_filename, frame.f_lineno))
+ if 0:
+ sys.stderr.write("trace event: %s %r @%d\n" % (
+ event, frame.f_code.co_filename, frame.f_lineno
+ ))
if self.last_exc_back:
if frame == self.last_exc_back:
@@ -61,10 +75,11 @@ class PyTracer(object):
# in this file.
self.data_stack.append((self.cur_file_data, self.last_line))
filename = frame.f_code.co_filename
- tracename = self.should_trace_cache.get(filename)
- if tracename is None:
+ if filename not in self.should_trace_cache:
tracename = self.should_trace(filename, frame)
self.should_trace_cache[filename] = tracename
+ else:
+ tracename = self.should_trace_cache[filename]
#print("called, stack is %d deep, tracename is %r" % (
# len(self.data_stack), tracename))
if tracename:
@@ -105,15 +120,21 @@ class PyTracer(object):
Return a Python function suitable for use with sys.settrace().
"""
+ self.thread = threading.currentThread()
sys.settrace(self._trace)
return self._trace
def stop(self):
"""Stop this Tracer."""
+ if self.thread != threading.currentThread():
+ # Called on a different thread than started us: do nothing.
+ return
+
if hasattr(sys, "gettrace") and self.warn:
if sys.gettrace() != self._trace:
msg = "Trace function changed, measurement is likely wrong: %r"
- self.warn(msg % sys.gettrace())
+ self.warn(msg % (sys.gettrace(),))
+ #print("Stopping tracer on %s" % threading.current_thread().ident)
sys.settrace(None)
def get_stats(self):
@@ -146,7 +167,7 @@ class Collector(object):
"""Create a collector.
`should_trace` is a function, taking a filename, and returning a
- canonicalized filename, or False depending on whether the file should
+ canonicalized filename, or None depending on whether the file should
be traced or not.
If `timid` is true, then a slower simpler trace function will be
@@ -190,7 +211,7 @@ class Collector(object):
# A cache of the results from should_trace, the decision about whether
# to trace execution in a file. A dict of filename to (filename or
- # False).
+ # None).
self.should_trace_cache = {}
# Our active Tracers.
@@ -232,24 +253,28 @@ class Collector(object):
if self._collectors:
self._collectors[-1].pause()
self._collectors.append(self)
- #print >>sys.stderr, "Started: %r" % self._collectors
+ #print("Started: %r" % self._collectors, file=sys.stderr)
# Check to see whether we had a fullcoverage tracer installed.
- traces0 = None
+ traces0 = []
if hasattr(sys, "gettrace"):
fn0 = sys.gettrace()
if fn0:
tracer0 = getattr(fn0, '__self__', None)
if tracer0:
- traces0 = getattr(tracer0, 'traces', None)
+ traces0 = getattr(tracer0, 'traces', [])
# Install the tracer on this thread.
fn = self._start_tracer()
- if traces0:
- for args in traces0:
- (frame, event, arg), lineno = args
+ for args in traces0:
+ (frame, event, arg), lineno = args
+ try:
fn(frame, event, arg, lineno=lineno)
+ except TypeError:
+ raise Exception(
+ "fullcoverage must be run with the C trace function."
+ )
# Install our installation tracer in threading, to jump start other
# threads.
diff --git a/coverage/config.py b/coverage/config.py
index 49d74e7a..87318ff1 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -1,7 +1,76 @@
"""Config file for coverage.py"""
-import os
-from coverage.backward import configparser # pylint: disable=W0622
+import os, re, sys
+from coverage.backward import string_class, iitems
+
+# In py3, # ConfigParser was renamed to the more-standard configparser
+try:
+ import configparser # pylint: disable=F0401
+except ImportError:
+ import ConfigParser as configparser
+
+
+class HandyConfigParser(configparser.RawConfigParser):
+ """Our specialization of ConfigParser."""
+
+ def read(self, filename):
+ """Read a filename as UTF-8 configuration data."""
+ kwargs = {}
+ if sys.version_info >= (3, 2):
+ kwargs['encoding'] = "utf-8"
+ return configparser.RawConfigParser.read(self, filename, **kwargs)
+
+ def get(self, *args, **kwargs):
+ v = configparser.RawConfigParser.get(self, *args, **kwargs)
+ def dollar_replace(m):
+ """Called for each $replacement."""
+ # Only one of the groups will have matched, just get its text.
+ word = [w for w in m.groups() if w is not None][0]
+ if word == "$":
+ return "$"
+ else:
+ return os.environ.get(word, '')
+
+ dollar_pattern = r"""(?x) # Use extended regex syntax
+ \$(?: # A dollar sign, then
+ (?P<v1>\w+) | # a plain word,
+ {(?P<v2>\w+)} | # or a {-wrapped word,
+ (?P<char>[$]) # or a dollar sign.
+ )
+ """
+ v = re.sub(dollar_pattern, dollar_replace, v)
+ return v
+
+ def getlist(self, section, option):
+ """Read a list of strings.
+
+ The value of `section` and `option` is treated as a comma- and newline-
+ separated list of strings. Each value is stripped of whitespace.
+
+ Returns the list of strings.
+
+ """
+ value_list = self.get(section, option)
+ values = []
+ for value_line in value_list.split('\n'):
+ for value in value_line.split(','):
+ value = value.strip()
+ if value:
+ values.append(value)
+ return values
+
+ def getlinelist(self, section, option):
+ """Read a list of full-line strings.
+
+ The value of `section` and `option` is treated as a newline-separated
+ list of strings. Each value is stripped of whitespace.
+
+ Returns the list of strings.
+
+ """
+ value_list = self.get(section, option)
+ return list(filter(None, value_list.split('\n')))
+
# The default line exclusion regexes
DEFAULT_EXCLUDE = [
@@ -29,9 +98,12 @@ class CoverageConfig(object):
operation of coverage.py.
"""
-
def __init__(self):
"""Initialize the configuration attributes to their defaults."""
+ # Metadata about the config.
+ self.attempted_config_files = []
+ self.config_files = []
+
# Defaults for [run]
self.branch = False
self.cover_pylib = False
@@ -39,6 +111,7 @@ class CoverageConfig(object):
self.parallel = False
self.timid = False
self.source = None
+ self.debug = []
# Defaults for [report]
self.exclude_list = DEFAULT_EXCLUDE[:]
@@ -53,6 +126,7 @@ class CoverageConfig(object):
# Defaults for [html]
self.html_dir = "htmlcov"
self.extra_css = None
+ self.html_title = "Coverage report"
# Defaults for [xml]
self.xml_output = "coverage.xml"
@@ -69,102 +143,71 @@ class CoverageConfig(object):
if env:
self.timid = ('--timid' in env)
+ MUST_BE_LIST = ["omit", "include", "debug"]
+
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
- for k, v in kwargs.items():
+ for k, v in iitems(kwargs):
if v is not None:
+ if k in self.MUST_BE_LIST and isinstance(v, string_class):
+ v = [v]
setattr(self, k, v)
- def from_file(self, *files):
- """Read configuration from .rc files.
+ def from_file(self, filename):
+ """Read configuration from a .rc file.
- Each argument in `files` is a file name to read.
+ `filename` is a file name to read.
"""
- cp = configparser.RawConfigParser()
- cp.read(files)
-
- # [run]
- if cp.has_option('run', 'branch'):
- self.branch = cp.getboolean('run', 'branch')
- if cp.has_option('run', 'cover_pylib'):
- self.cover_pylib = cp.getboolean('run', 'cover_pylib')
- if cp.has_option('run', 'data_file'):
- self.data_file = cp.get('run', 'data_file')
- if cp.has_option('run', 'include'):
- self.include = self.get_list(cp, 'run', 'include')
- if cp.has_option('run', 'omit'):
- self.omit = self.get_list(cp, 'run', 'omit')
- if cp.has_option('run', 'parallel'):
- self.parallel = cp.getboolean('run', 'parallel')
- if cp.has_option('run', 'source'):
- self.source = self.get_list(cp, 'run', 'source')
- if cp.has_option('run', 'timid'):
- self.timid = cp.getboolean('run', 'timid')
+ self.attempted_config_files.append(filename)
- # [report]
- if cp.has_option('report', 'exclude_lines'):
- self.exclude_list = \
- self.get_line_list(cp, 'report', 'exclude_lines')
- if cp.has_option('report', 'ignore_errors'):
- self.ignore_errors = cp.getboolean('report', 'ignore_errors')
- if cp.has_option('report', 'include'):
- self.include = self.get_list(cp, 'report', 'include')
- if cp.has_option('report', 'omit'):
- self.omit = self.get_list(cp, 'report', 'omit')
- if cp.has_option('report', 'partial_branches'):
- self.partial_list = \
- self.get_line_list(cp, 'report', 'partial_branches')
- if cp.has_option('report', 'partial_branches_always'):
- self.partial_always_list = \
- self.get_line_list(cp, 'report', 'partial_branches_always')
- if cp.has_option('report', 'precision'):
- self.precision = cp.getint('report', 'precision')
- if cp.has_option('report', 'show_missing'):
- self.show_missing = cp.getboolean('report', 'show_missing')
-
- # [html]
- if cp.has_option('html', 'directory'):
- self.html_dir = cp.get('html', 'directory')
- if cp.has_option('html', 'extra_css'):
- self.extra_css = cp.get('html', 'extra_css')
+ cp = HandyConfigParser()
+ files_read = cp.read(filename)
+ if files_read is not None: # return value changed in 2.4
+ self.config_files.extend(files_read)
- # [xml]
- if cp.has_option('xml', 'output'):
- self.xml_output = cp.get('xml', 'output')
+ for option_spec in self.CONFIG_FILE_OPTIONS:
+ self.set_attr_from_config_option(cp, *option_spec)
- # [paths]
+ # [paths] is special
if cp.has_section('paths'):
for option in cp.options('paths'):
- self.paths[option] = self.get_list(cp, 'paths', option)
-
- def get_list(self, cp, section, option):
- """Read a list of strings from the ConfigParser `cp`.
-
- The value of `section` and `option` is treated as a comma- and newline-
- separated list of strings. Each value is stripped of whitespace.
+ self.paths[option] = cp.getlist('paths', option)
- Returns the list of strings.
-
- """
- value_list = cp.get(section, option)
- values = []
- for value_line in value_list.split('\n'):
- for value in value_line.split(','):
- value = value.strip()
- if value:
- values.append(value)
- return values
-
- def get_line_list(self, cp, section, option):
- """Read a list of full-line strings from the ConfigParser `cp`.
-
- The value of `section` and `option` is treated as a newline-separated
- list of strings. Each value is stripped of whitespace.
+ CONFIG_FILE_OPTIONS = [
+ # [run]
+ ('branch', 'run:branch', 'boolean'),
+ ('cover_pylib', 'run:cover_pylib', 'boolean'),
+ ('data_file', 'run:data_file'),
+ ('debug', 'run:debug', 'list'),
+ ('include', 'run:include', 'list'),
+ ('omit', 'run:omit', 'list'),
+ ('parallel', 'run:parallel', 'boolean'),
+ ('source', 'run:source', 'list'),
+ ('timid', 'run:timid', 'boolean'),
- Returns the list of strings.
+ # [report]
+ ('exclude_list', 'report:exclude_lines', 'linelist'),
+ ('ignore_errors', 'report:ignore_errors', 'boolean'),
+ ('include', 'report:include', 'list'),
+ ('omit', 'report:omit', 'list'),
+ ('partial_list', 'report:partial_branches', 'linelist'),
+ ('partial_always_list', 'report:partial_branches_always', 'linelist'),
+ ('precision', 'report:precision', 'int'),
+ ('show_missing', 'report:show_missing', 'boolean'),
- """
- value_list = cp.get(section, option)
- return list(filter(None, value_list.split('\n')))
+ # [html]
+ ('html_dir', 'html:directory'),
+ ('extra_css', 'html:extra_css'),
+ ('html_title', 'html:title'),
+ # [xml]
+ ('xml_output', 'xml:output'),
+ ]
+
+ def set_attr_from_config_option(self, cp, attr, where, type_=''):
+ """Set an attribute on self if it exists in the ConfigParser."""
+ section, option = where.split(":")
+ if cp.has_option(section, option):
+ method = getattr(cp, 'get'+type_)
+ setattr(self, attr, method(section, option))
diff --git a/coverage/control.py b/coverage/control.py
index c21d885e..4b76121c 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -3,21 +3,31 @@
import atexit, os, random, socket, sys
from coverage.annotate import AnnotateReporter
-from coverage.backward import string_class
+from coverage.backward import string_class, iitems, sorted # pylint: disable=W0622
from coverage.codeunit import code_unit_factory, CodeUnit
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData
+from coverage.debug import DebugControl
from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
-from coverage.files import PathAliases, find_python_files
+from coverage.files import PathAliases, find_python_files, prep_patterns
from coverage.html import HtmlReporter
from coverage.misc import CoverageException, bool_or_none, join_regex
+from coverage.misc import file_be_gone
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
+# Pypy has some unusual stuff in the "stdlib". Consider those locations
+# when deciding where the stdlib is.
+try:
+ import _structseq # pylint: disable=F0401
+except ImportError:
+ _structseq = None
+
+
class coverage(object):
- """Programmatic access to Coverage.
+ """Programmatic access to coverage.py.
To use::
@@ -25,14 +35,15 @@ class coverage(object):
cov = coverage()
cov.start()
- #.. blah blah (run your code) blah blah ..
+ #.. call your code ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
- source=None, omit=None, include=None):
+ source=None, omit=None, include=None, debug=None,
+ debug_file=None):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
@@ -67,6 +78,10 @@ class coverage(object):
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
+ `debug` is a list of strings indicating what debugging information is
+ desired. `debug_file` is the file to write debug messages to,
+ defaulting to stderr.
+
"""
from coverage import __version__
@@ -96,18 +111,16 @@ class coverage(object):
self.config.data_file = env_data_file
# 4: from constructor arguments:
- if isinstance(omit, string_class):
- omit = [omit]
- if isinstance(include, string_class):
- include = [include]
self.config.from_args(
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
- source=source, omit=omit, include=include
+ source=source, omit=omit, include=include, debug=debug,
)
+ # Create and configure the debugging controller.
+ self.debug = DebugControl(self.config.debug, debug_file or sys.stderr)
+
self.auto_data = auto_data
- self.atexit_registered = False
# _exclude_re is a dict mapping exclusion list names to compiled
# regexes.
@@ -125,8 +138,8 @@ class coverage(object):
else:
self.source_pkgs.append(src)
- self.omit = self._prep_patterns(self.config.omit)
- self.include = self._prep_patterns(self.config.include)
+ self.omit = prep_patterns(self.config.omit)
+ self.include = prep_patterns(self.config.include)
self.collector = Collector(
self._should_trace, timid=self.config.timid,
@@ -151,7 +164,8 @@ class coverage(object):
# started rather than wherever the process eventually chdir'd to.
self.data = CoverageData(
basename=self.config.data_file,
- collector="coverage v%s" % __version__
+ collector="coverage v%s" % __version__,
+ debug=self.debug,
)
# The dirs for files considered "installed with the interpreter".
@@ -162,9 +176,9 @@ class coverage(object):
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
- for m in (atexit, os, random, socket):
- if hasattr(m, "__file__"):
- m_dir = self._canonical_dir(m.__file__)
+ for m in (atexit, os, random, socket, _structseq):
+ if m is not None and hasattr(m, "__file__"):
+ m_dir = self._canonical_dir(m)
if m_dir not in self.pylib_dirs:
self.pylib_dirs.append(m_dir)
@@ -172,55 +186,61 @@ class coverage(object):
# where we are.
self.cover_dir = self._canonical_dir(__file__)
- # The matchers for _should_trace, created when tracing starts.
+ # The matchers for _should_trace.
self.source_match = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
- # Only _harvest_data once per measurement cycle.
- self._harvested = False
-
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
- # When tearing down the coverage object, modules can become None.
- # Saving the modules as object attributes avoids problems, but it is
- # quite ad-hoc which modules need to be saved and which references
- # need to use the object attributes.
- self.socket = socket
- self.os = os
- self.random = random
+ # Is it ok for no data to be collected?
+ self._warn_no_data = True
+ self._warn_unimported_source = True
+
+ # State machine variables:
+ # Have we started collecting and not stopped it?
+ self._started = False
+ # Have we measured some data and not harvested it?
+ self._measured = False
+
+ atexit.register(self._atexit)
- def _canonical_dir(self, f):
- """Return the canonical directory of the file `f`."""
- return os.path.split(self.file_locator.canonical_filename(f))[0]
+ def _canonical_dir(self, morf):
+ """Return the canonical directory of the module or file `morf`."""
+ return os.path.split(CodeUnit(morf, self.file_locator).filename)[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`."""
if not filename.endswith(".py"):
if filename[-4:-1] == ".py":
filename = filename[:-1]
+ elif filename.endswith("$py.class"): # jython
+ filename = filename[:-9] + ".py"
return filename
- def _should_trace(self, filename, frame):
- """Decide whether to trace execution in `filename`
+ def _should_trace_with_reason(self, filename, frame):
+ """Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
- Returns a canonicalized filename if it should be traced, False if it
- should not.
+ Returns a pair of values: the first indicates whether the file should
+ be traced: it's a canonicalized filename if it should be traced, None
+ if it should not. The second value is a string, the resason for the
+ decision.
"""
- if os is None:
- return False
+ if not filename:
+ # Empty string is pretty useless
+ return None, "empty string isn't a filename"
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# filenames like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
- return False
+ return None, "not a real filename"
self._check_for_packages()
@@ -246,60 +266,47 @@ class coverage(object):
# stdlib and coverage.py directories.
if self.source_match:
if not self.source_match.match(canonical):
- return False
+ return None, "falls outside the --source trees"
elif self.include_match:
if not self.include_match.match(canonical):
- return False
+ return None, "falls outside the --include trees"
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(canonical):
- return False
+ return None, "is in the stdlib"
# We exclude the coverage code itself, since a little of it will be
# measured otherwise.
if self.cover_match and self.cover_match.match(canonical):
- return False
+ return None, "is part of coverage.py"
# Check the file against the omit pattern.
if self.omit_match and self.omit_match.match(canonical):
- return False
+ return None, "is inside an --omit pattern"
- return canonical
+ return canonical, "because we love you"
+
+ def _should_trace(self, filename, frame):
+ """Decide whether to trace execution in `filename`.
- # To log what should_trace returns, change this to "if 1:"
- if 0:
- _real_should_trace = _should_trace
- def _should_trace(self, filename, frame): # pylint: disable=E0102
- """A logging decorator around the real _should_trace function."""
- ret = self._real_should_trace(filename, frame)
- print("should_trace: %r -> %r" % (filename, ret))
- return ret
+ Calls `_should_trace_with_reason`, and returns just the decision.
+
+ """
+ canonical, reason = self._should_trace_with_reason(filename, frame)
+ if self.debug.should('trace'):
+ if not canonical:
+ msg = "Not tracing %r: %s" % (filename, reason)
+ else:
+ msg = "Tracing %r" % (filename,)
+ self.debug.write(msg)
+ return canonical
def _warn(self, msg):
"""Use `msg` as a warning."""
self._warnings.append(msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
- def _prep_patterns(self, patterns):
- """Prepare the file patterns for use in a `FnmatchMatcher`.
-
- If a pattern starts with a wildcard, it is used as a pattern
- as-is. If it does not start with a wildcard, then it is made
- absolute with the current directory.
-
- If `patterns` is None, an empty list is returned.
-
- """
- patterns = patterns or []
- prepped = []
- for p in patterns or []:
- if p.startswith("*") or p.startswith("?"):
- prepped.append(p)
- else:
- prepped.append(self.file_locator.abs_file(p))
- return prepped
-
def _check_for_packages(self):
"""Update the source_match matcher with latest imported packages."""
# Our self.source_pkgs attribute is a list of package names we want to
@@ -322,7 +329,7 @@ class coverage(object):
pkg_file = None
else:
d, f = os.path.split(pkg_file)
- if f.startswith('__init__.'):
+ if f.startswith('__init__'):
# This is actually a package, return the directory.
pkg_file = d
else:
@@ -354,17 +361,21 @@ class coverage(object):
self.data.read()
def start(self):
- """Start measuring code coverage."""
+ """Start measuring code coverage.
+
+ Coverage measurement actually occurs in functions called after `start`
+ is invoked. Statements in the same scope as `start` won't be measured.
+
+ Once you invoke `start`, you must also call `stop` eventually, or your
+ process might not shut down cleanly.
+
+ """
if self.run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self.data_suffix = self.run_suffix
if self.auto_data:
self.load()
- # Save coverage data when Python exits.
- if not self.atexit_registered:
- atexit.register(self.save)
- self.atexit_registered = True
# Create the matchers we need for _should_trace
if self.source or self.source_pkgs:
@@ -379,13 +390,31 @@ class coverage(object):
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
- self._harvested = False
+ # The user may want to debug things, show info if desired.
+ if self.debug.should('config'):
+ self.debug.write("Configuration values:")
+ config_info = sorted(self.config.__dict__.items())
+ self.debug.write_formatted_info(config_info)
+
+ if self.debug.should('sys'):
+ self.debug.write("Debugging info:")
+ self.debug.write_formatted_info(self.sysinfo())
+
self.collector.start()
+ self._started = True
+ self._measured = True
def stop(self):
"""Stop measuring code coverage."""
+ self._started = False
self.collector.stop()
- self._harvest_data()
+
+ def _atexit(self):
+ """Clean up on process shutdown."""
+ if self._started:
+ self.stop()
+ if self.auto_data:
+ self.save()
def erase(self):
"""Erase previously-collected coverage data.
@@ -449,9 +478,15 @@ class coverage(object):
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
- data_suffix = "%s.%s.%06d" % (
- self.socket.gethostname(), self.os.getpid(),
- self.random.randint(0, 99999)
+ extra = ""
+ if _TEST_NAME_FILE:
+ f = open(_TEST_NAME_FILE)
+ test_name = f.read()
+ f.close()
+ extra = "." + test_name
+ data_suffix = "%s%s.%s.%06d" % (
+ socket.gethostname(), extra, os.getpid(),
+ random.randint(0, 999999)
)
self._harvest_data()
@@ -480,27 +515,37 @@ class coverage(object):
Also warn about various problems collecting data.
"""
- if not self._harvested:
- self.data.add_line_data(self.collector.get_line_data())
- self.data.add_arc_data(self.collector.get_arc_data())
- self.collector.reset()
+ if not self._measured:
+ return
+
+ self.data.add_line_data(self.collector.get_line_data())
+ self.data.add_arc_data(self.collector.get_arc_data())
+ self.collector.reset()
- # If there are still entries in the source_pkgs list, then we never
- # encountered those packages.
+ # If there are still entries in the source_pkgs list, then we never
+ # encountered those packages.
+ if self._warn_unimported_source:
for pkg in self.source_pkgs:
self._warn("Module %s was never imported." % pkg)
- # Find out if we got any data.
- summary = self.data.summary()
- if not summary:
- self._warn("No data was collected.")
+ # Find out if we got any data.
+ summary = self.data.summary()
+ if not summary and self._warn_no_data:
+ self._warn("No data was collected.")
+
+ # Find files that were never executed at all.
+ for src in self.source:
+ for py_file in find_python_files(src):
+ py_file = self.file_locator.canonical_filename(py_file)
+
+ if self.omit_match and self.omit_match.match(py_file):
+ # Turns out this file was omitted, so don't pull it back
+ # in as unexecuted.
+ continue
- # Find files that were never executed at all.
- for src in self.source:
- for py_file in find_python_files(src):
- self.data.touch_file(py_file)
+ self.data.touch_file(py_file)
- self._harvested = True
+ self._measured = False
# Backward compatibility with version 1.
def analysis(self, morf):
@@ -537,6 +582,7 @@ class coverage(object):
Returns an `Analysis` object.
"""
+ self._harvest_data()
if not isinstance(it, CodeUnit):
it = code_unit_factory(it, self.file_locator)[0]
@@ -555,13 +601,16 @@ class coverage(object):
match those patterns will be included in the report. Modules matching
`omit` will not be included in the report.
+ Returns a float, the total percentage covered.
+
"""
+ self._harvest_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
show_missing=show_missing,
)
reporter = SummaryReporter(self, self.config)
- reporter.report(morfs, outfile=file)
+ return reporter.report(morfs, outfile=file)
def annotate(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
@@ -575,6 +624,7 @@ class coverage(object):
See `coverage.report()` for other arguments.
"""
+ self._harvest_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
@@ -582,7 +632,7 @@ class coverage(object):
reporter.report(morfs, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
- omit=None, include=None, extra_css=None):
+ omit=None, include=None, extra_css=None, title=None):
"""Generate an HTML report.
The HTML is written to `directory`. The file "index.html" is the
@@ -592,15 +642,21 @@ class coverage(object):
`extra_css` is a path to a file of other CSS to apply on the page.
It will be copied into the HTML directory.
+ `title` is a text string (not HTML) to use as the title of the HTML
+ report.
+
See `coverage.report()` for other arguments.
+ Returns a float, the total percentage covered.
+
"""
+ self._harvest_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
- html_dir=directory, extra_css=extra_css,
+ html_dir=directory, extra_css=extra_css, html_title=title,
)
reporter = HtmlReporter(self, self.config)
- reporter.report(morfs)
+ return reporter.report(morfs)
def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None):
@@ -613,12 +669,16 @@ class coverage(object):
See `coverage.report()` for other arguments.
+ Returns a float, the total percentage covered.
+
"""
+ self._harvest_data()
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
xml_output=outfile,
)
file_to_close = None
+ delete_file = False
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
@@ -626,11 +686,17 @@ class coverage(object):
outfile = open(self.config.xml_output, "w")
file_to_close = outfile
try:
- reporter = XmlReporter(self, self.config)
- reporter.report(morfs, outfile=outfile)
+ try:
+ reporter = XmlReporter(self, self.config)
+ return reporter.report(morfs, outfile=outfile)
+ except CoverageException:
+ delete_file = True
+ raise
finally:
if file_to_close:
file_to_close.close()
+ if delete_file:
+ file_be_gone(self.config.xml_output)
def sysinfo(self):
"""Return a list of (key, value) pairs showing internal information."""
@@ -649,17 +715,32 @@ class coverage(object):
('cover_dir', self.cover_dir),
('pylib_dirs', self.pylib_dirs),
('tracer', self.collector.tracer_name()),
+ ('config_files', self.config.attempted_config_files),
+ ('configs_read', self.config.config_files),
('data_path', self.data.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('implementation', implementation),
+ ('executable', sys.executable),
('cwd', os.getcwd()),
('path', sys.path),
- ('environment', [
- ("%s = %s" % (k, v)) for k, v in os.environ.items()
- if re.search("^COV|^PY", k)
- ]),
+ ('environment', sorted([
+ ("%s = %s" % (k, v)) for k, v in iitems(os.environ)
+ if re.search(r"^COV|^PY", k)
+ ])),
+ ('command_line', " ".join(getattr(sys, 'argv', ['???']))),
]
+ if self.source_match:
+ info.append(('source_match', self.source_match.info()))
+ if self.include_match:
+ info.append(('include_match', self.include_match.info()))
+ if self.omit_match:
+ info.append(('omit_match', self.omit_match.info()))
+ if self.cover_match:
+ info.append(('cover_match', self.cover_match.info()))
+ if self.pylib_match:
+ info.append(('pylib_match', self.pylib_match.info()))
+
return info
@@ -686,7 +767,10 @@ def process_startup():
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
cov = coverage(config_file=cps, auto_data=True)
- if os.environ.get("COVERAGE_COVERAGE"):
- # Measuring coverage within coverage.py takes yet more trickery.
- cov.cover_dir = "Please measure coverage.py!"
cov.start()
+ cov._warn_no_data = False
+ cov._warn_unimported_source = False
+
+
+# A hack for debugging testing in subprocesses.
+_TEST_NAME_FILE = "" #"/tmp/covtest.txt"
diff --git a/coverage/data.py b/coverage/data.py
index 7a8d656f..fb88c5b1 100644
--- a/coverage/data.py
+++ b/coverage/data.py
@@ -2,8 +2,9 @@
import os
-from coverage.backward import pickle, sorted # pylint: disable=W0622
+from coverage.backward import iitems, pickle, sorted # pylint: disable=W0622
from coverage.files import PathAliases
+from coverage.misc import file_be_gone
class CoverageData(object):
@@ -22,15 +23,18 @@ class CoverageData(object):
"""
- def __init__(self, basename=None, collector=None):
+ def __init__(self, basename=None, collector=None, debug=None):
"""Create a CoverageData.
`basename` is the name of the file to use for storing data.
`collector` is a string describing the coverage measurement software.
+ `debug` is a `DebugControl` object for writing debug messages.
+
"""
self.collector = collector or 'unknown'
+ self.debug = debug
self.use_file = True
@@ -60,10 +64,6 @@ class CoverageData(object):
#
self.arcs = {}
- self.os = os
- self.sorted = sorted
- self.pickle = pickle
-
def usefile(self, use_file=True):
"""Set whether or not to use a disk file for data."""
self.use_file = use_file
@@ -93,21 +93,21 @@ class CoverageData(object):
def erase(self):
"""Erase the data, both in this object, and from its file storage."""
if self.use_file:
- if self.filename and os.path.exists(self.filename):
- os.remove(self.filename)
+ if self.filename:
+ file_be_gone(self.filename)
self.lines = {}
self.arcs = {}
def line_data(self):
"""Return the map from filenames to lists of line numbers executed."""
return dict(
- [(f, self.sorted(lmap.keys())) for f, lmap in self.lines.items()]
+ [(f, sorted(lmap.keys())) for f, lmap in iitems(self.lines)]
)
def arc_data(self):
"""Return the map from filenames to lists of line number pairs."""
return dict(
- [(f, self.sorted(amap.keys())) for f, amap in self.arcs.items()]
+ [(f, sorted(amap.keys())) for f, amap in iitems(self.arcs)]
)
def write_file(self, filename):
@@ -124,10 +124,13 @@ class CoverageData(object):
if self.collector:
data['collector'] = self.collector
+ if self.debug and self.debug.should('dataio'):
+ self.debug.write("Writing data to %r" % (filename,))
+
# Write the pickle to the file.
fdata = open(filename, 'wb')
try:
- self.pickle.dump(data, fdata, 2)
+ pickle.dump(data, fdata, 2)
finally:
fdata.close()
@@ -137,6 +140,8 @@ class CoverageData(object):
def raw_data(self, filename):
"""Return the raw pickled data from `filename`."""
+ if self.debug and self.debug.should('dataio'):
+ self.debug.write("Reading data from %r" % (filename,))
fdata = open(filename, 'rb')
try:
data = pickle.load(fdata)
@@ -159,12 +164,12 @@ class CoverageData(object):
# Unpack the 'lines' item.
lines = dict([
(f, dict.fromkeys(linenos, None))
- for f, linenos in data.get('lines', {}).items()
+ for f, linenos in iitems(data.get('lines', {}))
])
# Unpack the 'arcs' item.
arcs = dict([
(f, dict.fromkeys(arcpairs, None))
- for f, arcpairs in data.get('arcs', {}).items()
+ for f, arcpairs in iitems(data.get('arcs', {}))
])
except Exception:
pass
@@ -187,10 +192,10 @@ class CoverageData(object):
if f.startswith(localdot):
full_path = os.path.join(data_dir, f)
new_lines, new_arcs = self._read_file(full_path)
- for filename, file_data in new_lines.items():
+ for filename, file_data in iitems(new_lines):
filename = aliases.map(filename)
self.lines.setdefault(filename, {}).update(file_data)
- for filename, file_data in new_arcs.items():
+ for filename, file_data in iitems(new_arcs):
filename = aliases.map(filename)
self.arcs.setdefault(filename, {}).update(file_data)
if f != local:
@@ -202,7 +207,7 @@ class CoverageData(object):
`line_data` is { filename: { lineno: None, ... }, ...}
"""
- for filename, linenos in line_data.items():
+ for filename, linenos in iitems(line_data):
self.lines.setdefault(filename, {}).update(linenos)
def add_arc_data(self, arc_data):
@@ -211,7 +216,7 @@ class CoverageData(object):
`arc_data` is { filename: { (l1,l2): None, ... }, ...}
"""
- for filename, arcs in arc_data.items():
+ for filename, arcs in iitems(arc_data):
self.arcs.setdefault(filename, {}).update(arcs)
def touch_file(self, filename):
@@ -252,8 +257,8 @@ class CoverageData(object):
if fullpath:
filename_fn = lambda f: f
else:
- filename_fn = self.os.path.basename
- for filename, lines in self.lines.items():
+ filename_fn = os.path.basename
+ for filename, lines in iitems(self.lines):
summ[filename_fn(filename)] = len(lines)
return summ
diff --git a/coverage/debug.py b/coverage/debug.py
new file mode 100644
index 00000000..104f3b1d
--- /dev/null
+++ b/coverage/debug.py
@@ -0,0 +1,54 @@
+"""Control of and utilities for debugging."""
+
+import os
+
+
+# When debugging, it can be helpful to force some options, especially when
+# debugging the configuration mechanisms you usually use to control debugging!
+# This is a list of forced debugging options.
+FORCED_DEBUG = []
+
+
+class DebugControl(object):
+ """Control and output for debugging."""
+
+ def __init__(self, options, output):
+ """Configure the options and output file for debugging."""
+ self.options = options
+ self.output = output
+
+ def should(self, option):
+ """Decide whether to output debug information in category `option`."""
+ return (option in self.options or option in FORCED_DEBUG)
+
+ def write(self, msg):
+ """Write a line of debug output."""
+ if self.should('pid'):
+ msg = "pid %5d: %s" % (os.getpid(), msg)
+ self.output.write(msg+"\n")
+ self.output.flush()
+
+ def write_formatted_info(self, info):
+ """Write a sequence of (label,data) pairs nicely."""
+ for line in info_formatter(info):
+ self.write(" %s" % line)
+
+
+def info_formatter(info):
+ """Produce a sequence of formatted lines from info.
+
+ `info` is a sequence of pairs (label, data). The produced lines are
+ nicely formatted, ready to print.
+
+ """
+ label_len = max([len(l) for l, _d in info])
+ for label, data in info:
+ if data == []:
+ data = "-none-"
+ if isinstance(data, (list, tuple)):
+ prefix = "%*s:" % (label_len, label)
+ for e in data:
+ yield "%*s %s" % (label_len+1, prefix, e)
+ prefix = ""
+ else:
+ yield "%*s: %s" % (label_len, label, data)
diff --git a/coverage/execfile.py b/coverage/execfile.py
index 3283a3f7..fbb49b2a 100644
--- a/coverage/execfile.py
+++ b/coverage/execfile.py
@@ -65,6 +65,7 @@ def run_python_module(modulename, args):
openfile.close()
# Finally, hand the file off to run_python_file for execution.
+ pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename)
@@ -87,14 +88,9 @@ def run_python_file(filename, args, package=None):
main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
- # Set sys.argv and the first path element properly.
+ # Set sys.argv properly.
old_argv = sys.argv
- old_path0 = sys.path[0]
sys.argv = args
- if package:
- sys.path[0] = ''
- else:
- sys.path[0] = os.path.abspath(os.path.dirname(filename))
try:
# Open the source file.
@@ -110,7 +106,7 @@ def run_python_file(filename, args, package=None):
# We have the source. `compile` still needs the last line to be clean,
# so make sure it is, then compile a code object from it.
- if source[-1] != '\n':
+ if not source or source[-1] != '\n':
source += '\n'
code = compile(source, filename, "exec")
@@ -135,4 +131,3 @@ def run_python_file(filename, args, package=None):
# Restore the old argv and path
sys.argv = old_argv
- sys.path[0] = old_path0
diff --git a/coverage/files.py b/coverage/files.py
index e6dc4aa1..8d154c6f 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -2,23 +2,19 @@
from coverage.backward import to_string
from coverage.misc import CoverageException
-import fnmatch, os, re, sys
+import fnmatch, os, os.path, re, sys
class FileLocator(object):
"""Understand how filenames work."""
def __init__(self):
# The absolute path to our current directory.
- self.relative_dir = self.abs_file(os.curdir) + os.sep
+ self.relative_dir = os.path.normcase(abs_file(os.curdir) + os.sep)
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
self.canonical_filename_cache = {}
- def abs_file(self, filename):
- """Return the absolute normalized form of `filename`."""
- return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
-
def relative_filename(self, filename):
"""Return the relative form of `filename`.
@@ -26,8 +22,9 @@ class FileLocator(object):
`FileLocator` was constructed.
"""
- if filename.startswith(self.relative_dir):
- filename = filename.replace(self.relative_dir, "")
+ fnorm = os.path.normcase(filename)
+ if fnorm.startswith(self.relative_dir):
+ filename = filename[len(self.relative_dir):]
return filename
def canonical_filename(self, filename):
@@ -37,19 +34,15 @@ class FileLocator(object):
"""
if filename not in self.canonical_filename_cache:
- f = filename
- if os.path.isabs(f) and not os.path.exists(f):
- if self.get_zip_data(f) is None:
- f = os.path.basename(f)
- if not os.path.isabs(f):
+ if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
- g = os.path.join(path, f)
- if os.path.exists(g):
- f = g
+ f = os.path.join(path, filename)
+ if os.path.exists(f):
+ filename = f
break
- cf = self.abs_file(f)
+ cf = abs_file(filename)
self.canonical_filename_cache[filename] = cf
return self.canonical_filename_cache[filename]
@@ -78,6 +71,71 @@ class FileLocator(object):
return None
+if sys.platform == 'win32':
+
+ def actual_path(path):
+ """Get the actual path of `path`, including the correct case."""
+ if path in actual_path.cache:
+ return actual_path.cache[path]
+
+ head, tail = os.path.split(path)
+ if not tail:
+ actpath = head
+ elif not head:
+ actpath = tail
+ else:
+ head = actual_path(head)
+ if head in actual_path.list_cache:
+ files = actual_path.list_cache[head]
+ else:
+ try:
+ files = os.listdir(head)
+ except OSError:
+ files = []
+ actual_path.list_cache[head] = files
+ normtail = os.path.normcase(tail)
+ for f in files:
+ if os.path.normcase(f) == normtail:
+ tail = f
+ break
+ actpath = os.path.join(head, tail)
+ actual_path.cache[path] = actpath
+ return actpath
+
+ actual_path.cache = {}
+ actual_path.list_cache = {}
+
+else:
+ def actual_path(filename):
+ """The actual path for non-Windows platforms."""
+ return filename
+
+def abs_file(filename):
+ """Return the absolute normalized form of `filename`."""
+ path = os.path.abspath(os.path.realpath(filename))
+ path = actual_path(path)
+ return path
+
+
+def prep_patterns(patterns):
+ """Prepare the file patterns for use in a `FnmatchMatcher`.
+
+ If a pattern starts with a wildcard, it is used as a pattern
+ as-is. If it does not start with a wildcard, then it is made
+ absolute with the current directory.
+
+ If `patterns` is None, an empty list is returned.
+
+ """
+ prepped = []
+ for p in patterns or []:
+ if p.startswith("*") or p.startswith("?"):
+ prepped.append(p)
+ else:
+ prepped.append(abs_file(p))
+ return prepped
+
+
class TreeMatcher(object):
"""A matcher for files in a tree."""
def __init__(self, directories):
@@ -86,6 +144,10 @@ class TreeMatcher(object):
def __repr__(self):
return "<TreeMatcher %r>" % self.dirs
+ def info(self):
+ """A list of strings for displaying when dumping state."""
+ return self.dirs
+
def add(self, directory):
"""Add another directory to the list we match for."""
self.dirs.append(directory)
@@ -111,6 +173,10 @@ class FnmatchMatcher(object):
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
+ def info(self):
+ """A list of strings for displaying when dumping state."""
+ return self.pats
+
def match(self, fpath):
"""Does `fpath` match one of our filename patterns?"""
for pat in self.pats:
@@ -175,7 +241,7 @@ class PathAliases(object):
# either separator.
regex_pat = regex_pat.replace(r"\/", r"[\\/]")
# We want case-insensitive matching, so add that flag.
- regex = re.compile("(?i)" + regex_pat)
+ regex = re.compile(r"(?i)" + regex_pat)
# Normalize the result: it must end with a path separator.
result_sep = sep(result)
@@ -207,9 +273,17 @@ class PathAliases(object):
def find_python_files(dirname):
- """Yield all of the importable Python files in `dirname`, recursively."""
- for dirpath, dirnames, filenames in os.walk(dirname, topdown=True):
- if '__init__.py' not in filenames:
+ """Yield all of the importable Python files in `dirname`, recursively.
+
+ To be importable, the files have to be in a directory with a __init__.py,
+ except for `dirname` itself, which isn't required to have one. The
+ assumption is that `dirname` was specified directly, so the user knows
+ best, but subdirectories are checked for a __init__.py to be sure we only
+ find the importable files.
+
+ """
+ for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
+ if i > 0 and '__init__.py' not in filenames:
# If a directory doesn't have __init__.py, then it isn't
# importable and neither are its files
del dirnames[:]
diff --git a/coverage/fullcoverage/encodings.py b/coverage/fullcoverage/encodings.py
index 9409b7d7..6a258d67 100644
--- a/coverage/fullcoverage/encodings.py
+++ b/coverage/fullcoverage/encodings.py
@@ -37,14 +37,21 @@ class FullCoverageTracer(object):
sys.settrace(FullCoverageTracer().fullcoverage_trace)
+# In coverage/files.py is actual_filename(), which uses glob.glob. I don't
+# understand why, but that use of glob borks everything if fullcoverage is in
+# effect. So here we make an ugly hail-mary pass to switch off glob.glob over
+# there. This means when using fullcoverage, Windows path names will not be
+# their actual case.
+
+#sys.fullcoverage = True
+
# Finally, remove our own directory from sys.path; remove ourselves from
# sys.modules; and re-import "encodings", which will be the real package
# this time. Note that the delete from sys.modules dictionary has to
# happen last, since all of the symbols in this module will become None
# at that exact moment, including "sys".
-parentdirs = [ d for d in sys.path if __file__.startswith(d) ]
-parentdirs.sort(key=len)
-sys.path.remove(parentdirs[-1])
+parentdir = max(filter(__file__.startswith, sys.path), key=len)
+sys.path.remove(parentdir)
del sys.modules['encodings']
import encodings
diff --git a/coverage/html.py b/coverage/html.py
index b0eff5f2..ed8920f2 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -7,6 +7,7 @@ from coverage.backward import pickle
from coverage.misc import CoverageException, Hasher
from coverage.phystokens import source_token_lines, source_encoding
from coverage.report import Reporter
+from coverage.results import Numbers
from coverage.templite import Templite
# Disable pylint msg W0612, because a bunch of variables look unused, but
@@ -46,6 +47,7 @@ class HtmlReporter(Reporter):
self.directory = None
self.template_globals = {
'escape': escape,
+ 'title': self.config.html_title,
'__url__': coverage.__url__,
'__version__': coverage.__version__,
}
@@ -59,6 +61,7 @@ class HtmlReporter(Reporter):
self.arcs = self.coverage.data.has_arcs()
self.status = HtmlStatus()
self.extra_css = None
+ self.totals = Numbers()
def report(self, morfs):
"""Generate an HTML report for `morfs`.
@@ -94,6 +97,8 @@ class HtmlReporter(Reporter):
self.make_local_static_report_files()
+ return self.totals.pc_covered
+
def make_local_static_report_files(self):
"""Make local instances of static files for HTML report."""
# The files we provide must always be copied.
@@ -157,7 +162,6 @@ class HtmlReporter(Reporter):
nums = analysis.numbers
missing_branch_arcs = analysis.missing_branch_arcs()
- n_par = 0 # accumulated below.
arcs = self.arcs
# These classes determine which lines are highlighted by default.
@@ -182,7 +186,6 @@ class HtmlReporter(Reporter):
line_class.append(c_mis)
elif self.arcs and lineno in missing_branch_arcs:
line_class.append(c_par)
- n_par += 1
annlines = []
for b in missing_branch_arcs[lineno]:
if b < 0:
@@ -229,7 +232,6 @@ class HtmlReporter(Reporter):
# Save this file's information for the index file.
index_info = {
'nums': nums,
- 'par': n_par,
'html_filename': html_filename,
'name': cu.name,
}
@@ -245,12 +247,15 @@ class HtmlReporter(Reporter):
files = self.files
arcs = self.arcs
- totals = sum([f['nums'] for f in files])
+ self.totals = totals = sum([f['nums'] for f in files])
extra_css = self.extra_css
+ html = index_tmpl.render(locals())
+ if sys.version_info < (3, 0):
+ html = html.decode("utf-8")
self.write_html(
os.path.join(self.directory, "index.html"),
- index_tmpl.render(locals())
+ html
)
# Write the latest hashes for next time.
@@ -281,7 +286,7 @@ class HtmlStatus(object):
status = pickle.load(fstatus)
finally:
fstatus.close()
- except IOError:
+ except (IOError, ValueError):
usable = False
else:
usable = True
@@ -358,5 +363,5 @@ def spaceless(html):
Get rid of some.
"""
- html = re.sub(">\s+<p ", ">\n<p ", html)
+ html = re.sub(r">\s+<p ", ">\n<p ", html)
return html
diff --git a/coverage/htmlfiles/coverage_html.js b/coverage/htmlfiles/coverage_html.js
index 5906e653..b24006d2 100644
--- a/coverage/htmlfiles/coverage_html.js
+++ b/coverage/htmlfiles/coverage_html.js
@@ -374,4 +374,3 @@ coverage.scroll_window = function (to_pos) {
coverage.finish_scrolling = function () {
$("html,body").stop(true, true);
};
-
diff --git a/coverage/htmlfiles/index.html b/coverage/htmlfiles/index.html
index c6d9eec0..c649a83c 100644
--- a/coverage/htmlfiles/index.html
+++ b/coverage/htmlfiles/index.html
@@ -2,7 +2,7 @@
<html>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
- <title>Coverage report</title>
+ <title>{{ title|escape }}</title>
<link rel='stylesheet' href='style.css' type='text/css'>
{% if extra_css %}
<link rel='stylesheet' href='{{ extra_css }}' type='text/css'>
@@ -19,7 +19,7 @@
<div id='header'>
<div class='content'>
- <h1>Coverage report:
+ <h1>{{ title|escape }}:
<span class='pc_cov'>{{totals.pc_covered_str}}%</span>
</h1>
<img id='keyboard_icon' src='keybd_closed.png'>
@@ -69,7 +69,7 @@
<td>{{totals.n_excluded}}</td>
{% if arcs %}
<td>{{totals.n_branches}}</td>
- <td>{{totals.n_missing_branches}}</td>
+ <td>{{totals.n_partial_branches}}</td>
{% endif %}
<td class='right'>{{totals.pc_covered_str}}%</td>
</tr>
@@ -83,7 +83,7 @@
<td>{{file.nums.n_excluded}}</td>
{% if arcs %}
<td>{{file.nums.n_branches}}</td>
- <td>{{file.nums.n_missing_branches}}</td>
+ <td>{{file.nums.n_partial_branches}}</td>
{% endif %}
<td class='right'>{{file.nums.pc_covered_str}}%</td>
</tr>
diff --git a/coverage/htmlfiles/pyfile.html b/coverage/htmlfiles/pyfile.html
index 490fad86..525939f8 100644
--- a/coverage/htmlfiles/pyfile.html
+++ b/coverage/htmlfiles/pyfile.html
@@ -32,7 +32,7 @@
<span class='{{c_mis}} shortkey_m button_toggle_mis'>{{nums.n_missing}} missing</span>
<span class='{{c_exc}} shortkey_x button_toggle_exc'>{{nums.n_excluded}} excluded</span>
{% if arcs %}
- <span class='{{c_par}} shortkey_p button_toggle_par'>{{n_par}} partial</span>
+ <span class='{{c_par}} shortkey_p button_toggle_par'>{{nums.n_partial_branches}} partial</span>
{% endif %}
</h2>
</div>
diff --git a/coverage/htmlfiles/style.css b/coverage/htmlfiles/style.css
index 70715ac1..811c6401 100644
--- a/coverage/htmlfiles/style.css
+++ b/coverage/htmlfiles/style.css
@@ -24,8 +24,8 @@ html>body {
/* Set base font size to 12/16 */
p {
- font-size: .75em; /* 12/16 */
- line-height: 1.3333em; /* 16/12 */
+ font-size: .75em; /* 12/16 */
+ line-height: 1.33333333em; /* 16/12 */
}
table {
diff --git a/coverage/misc.py b/coverage/misc.py
index fd9be857..473d7d43 100644
--- a/coverage/misc.py
+++ b/coverage/misc.py
@@ -1,6 +1,10 @@
"""Miscellaneous stuff for Coverage."""
+import errno
import inspect
+import os
+import sys
+
from coverage.backward import md5, sorted # pylint: disable=W0622
from coverage.backward import string_class, to_bytes
@@ -50,6 +54,12 @@ def format_lines(statements, lines):
return ret
+def short_stack():
+ """Return a string summarizing the call stack."""
+ stack = inspect.stack()[:0:-1]
+ return "\n".join(["%30s : %s @%d" % (t[3],t[1],t[2]) for t in stack])
+
+
def expensive(fn):
"""A decorator to cache the result of an expensive operation.
@@ -76,13 +86,23 @@ def bool_or_none(b):
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
if len(regexes) > 1:
- return "(" + ")|(".join(regexes) + ")"
+ return "|".join(["(%s)" % r for r in regexes])
elif regexes:
return regexes[0]
else:
return ""
+def file_be_gone(path):
+ """Remove a file, and don't get annoyed if it doesn't exist."""
+ try:
+ os.remove(path)
+ except OSError:
+ _, e, _ = sys.exc_info()
+ if e.errno != errno.ENOENT:
+ raise
+
+
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
diff --git a/coverage/parser.py b/coverage/parser.py
index f868d357..2d777a5d 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -1,9 +1,10 @@
"""Code parsing for Coverage."""
-import opcode, re, sys, token, tokenize
+import dis, re, sys, token, tokenize
from coverage.backward import set, sorted, StringIO # pylint: disable=W0622
-from coverage.backward import open_source
+from coverage.backward import open_source, range # pylint: disable=W0622
+from coverage.backward import reversed # pylint: disable=W0622
from coverage.bytecode import ByteCodes, CodeObjects
from coverage.misc import nice_pair, expensive, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
@@ -32,7 +33,7 @@ class CodeParser(object):
except IOError:
_, err, _ = sys.exc_info()
raise NoSource(
- "No source for code: %r: %s" % (self.filename, err)
+ "No source for code: '%s': %s" % (self.filename, err)
)
# Scrap the BOM if it exists.
@@ -108,7 +109,7 @@ class CodeParser(object):
tokgen = tokenize.generate_tokens(StringIO(self.text).readline)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
- if self.show_tokens: # pragma: no cover
+ if self.show_tokens: # pragma: not covered
print("%10s %5s %-20r %r" % (
tokenize.tok_name.get(toktype, toktype),
nice_pair((slineno, elineno)), ttext, ltext
@@ -134,8 +135,7 @@ class CodeParser(object):
# (a trick from trace.py in the stdlib.) This works for
# 99.9999% of cases. For the rest (!) see:
# http://stackoverflow.com/questions/1769332/x/1769794#1769794
- for i in range(slineno, elineno+1):
- self.docstrings.add(i)
+ self.docstrings.update(range(slineno, elineno+1))
elif toktype == token.NEWLINE:
if first_line is not None and elineno != first_line:
# We're at the end of a line, and we've ended on a
@@ -203,7 +203,15 @@ class CodeParser(object):
statements.
"""
- self._raw_parse()
+ try:
+ self._raw_parse()
+ except (tokenize.TokenError, IndentationError):
+ _, tokerr, _ = sys.exc_info()
+ msg, lineno = tokerr.args
+ raise NotPython(
+ "Couldn't parse '%s' as Python source: '%s' at %s" %
+ (self.filename, msg, lineno)
+ )
excluded_lines = self.first_lines(self.excluded)
ignore = excluded_lines + list(self.docstrings)
@@ -262,8 +270,8 @@ class CodeParser(object):
## Opcodes that guide the ByteParser.
def _opcode(name):
- """Return the opcode by name from the opcode module."""
- return opcode.opmap[name]
+ """Return the opcode by name from the dis module."""
+ return dis.opmap[name]
def _opcode_set(*names):
"""Return a set of opcodes by the names in `names`."""
@@ -362,52 +370,60 @@ class ByteParser(object):
# Getting numbers from the lnotab value changed in Py3.0.
if sys.version_info >= (3, 0):
def _lnotab_increments(self, lnotab):
- """Return a list of ints from the lnotab bytes in 3.x"""
- return list(lnotab)
+ """Produce ints from the lnotab bytes in 3.x"""
+ # co_lnotab is a bytes object, which iterates as ints.
+ return lnotab
else:
def _lnotab_increments(self, lnotab):
- """Return a list of ints from the lnotab string in 2.x"""
- return [ord(c) for c in lnotab]
+ """Produce ints from the lnotab string in 2.x"""
+ for c in lnotab:
+ yield ord(c)
def _bytes_lines(self):
"""Map byte offsets to line numbers in `code`.
Uses co_lnotab described in Python/compile.c to map byte offsets to
- line numbers. Returns a list: [(b0, l0), (b1, l1), ...]
+ line numbers. Produces a sequence: (b0, l0), (b1, l1), ...
+
+ Only byte offsets that correspond to line numbers are included in the
+ results.
"""
# Adapted from dis.py in the standard library.
byte_increments = self._lnotab_increments(self.code.co_lnotab[0::2])
line_increments = self._lnotab_increments(self.code.co_lnotab[1::2])
- bytes_lines = []
last_line_num = None
line_num = self.code.co_firstlineno
byte_num = 0
for byte_incr, line_incr in zip(byte_increments, line_increments):
if byte_incr:
if line_num != last_line_num:
- bytes_lines.append((byte_num, line_num))
+ yield (byte_num, line_num)
last_line_num = line_num
byte_num += byte_incr
line_num += line_incr
if line_num != last_line_num:
- bytes_lines.append((byte_num, line_num))
- return bytes_lines
+ yield (byte_num, line_num)
def _find_statements(self):
"""Find the statements in `self.code`.
- Return a set of line numbers that start statements. Recurses into all
- code objects reachable from `self.code`.
+ Produce a sequence of line numbers that start statements. Recurses
+ into all code objects reachable from `self.code`.
"""
- stmts = set()
for bp in self.child_parsers():
# Get all of the lineno information from this code.
for _, l in bp._bytes_lines():
- stmts.add(l)
- return stmts
+ yield l
+
+ def _block_stack_repr(self, block_stack):
+ """Get a string version of `block_stack`, for debugging."""
+ blocks = ", ".join(
+ ["(%s, %r)" % (dis.opname[b[0]], b[1]) for b in block_stack]
+ )
+ return "[" + blocks + "]"
def _split_into_chunks(self):
"""Split the code object into a list of `Chunk` objects.
@@ -418,10 +434,11 @@ class ByteParser(object):
Returns a list of `Chunk` objects.
"""
-
# The list of chunks so far, and the one we're working on.
chunks = []
chunk = None
+
+ # A dict mapping byte offsets of line starts to the line numbers.
bytes_lines_map = dict(self._bytes_lines())
# The block stack: loops and try blocks get pushed here for the
@@ -436,24 +453,37 @@ class ByteParser(object):
# We have to handle the last two bytecodes specially.
ult = penult = None
+ # Get a set of all of the jump-to points.
+ jump_to = set()
+ for bc in ByteCodes(self.code.co_code):
+ if bc.jump_to >= 0:
+ jump_to.add(bc.jump_to)
+
+ chunk_lineno = 0
+
+ # Walk the byte codes building chunks.
for bc in ByteCodes(self.code.co_code):
# Maybe have to start a new chunk
+ start_new_chunk = False
+ first_chunk = False
if bc.offset in bytes_lines_map:
# Start a new chunk for each source line number.
- if chunk:
- chunk.exits.add(bc.offset)
- chunk = Chunk(bc.offset, bytes_lines_map[bc.offset])
- chunks.append(chunk)
+ start_new_chunk = True
+ chunk_lineno = bytes_lines_map[bc.offset]
+ first_chunk = True
+ elif bc.offset in jump_to:
+ # To make chunks have a single entrance, we have to make a new
+ # chunk when we get to a place some bytecode jumps to.
+ start_new_chunk = True
elif bc.op in OPS_CHUNK_BEGIN:
# Jumps deserve their own unnumbered chunk. This fixes
# problems with jumps to jumps getting confused.
+ start_new_chunk = True
+
+ if not chunk or start_new_chunk:
if chunk:
chunk.exits.add(bc.offset)
- chunk = Chunk(bc.offset)
- chunks.append(chunk)
-
- if not chunk:
- chunk = Chunk(bc.offset)
+ chunk = Chunk(bc.offset, chunk_lineno, first_chunk)
chunks.append(chunk)
# Look at the opcode
@@ -482,15 +512,11 @@ class ByteParser(object):
chunk.exits.add(block_stack[-1][1])
chunk = None
if bc.op == OP_END_FINALLY:
- if block_stack:
- # A break that goes through a finally will jump to whatever
- # block is on top of the stack.
- chunk.exits.add(block_stack[-1][1])
# For the finally clause we need to find the closest exception
# block, and use its jump target as an exit.
- for iblock in range(len(block_stack)-1, -1, -1):
- if block_stack[iblock][0] in OPS_EXCEPT_BLOCKS:
- chunk.exits.add(block_stack[iblock][1])
+ for block in reversed(block_stack):
+ if block[0] in OPS_EXCEPT_BLOCKS:
+ chunk.exits.add(block[1])
break
if bc.op == OP_COMPARE_OP and bc.arg == COMPARE_EXCEPTION:
# This is an except clause. We want to overlook the next
@@ -516,23 +542,33 @@ class ByteParser(object):
last_chunk = chunks[-1]
last_chunk.exits.remove(ex)
last_chunk.exits.add(penult.offset)
- chunk = Chunk(penult.offset)
+ chunk = Chunk(
+ penult.offset, last_chunk.line, False
+ )
chunk.exits.add(ex)
chunks.append(chunk)
# Give all the chunks a length.
- chunks[-1].length = bc.next_offset - chunks[-1].byte
+ chunks[-1].length = bc.next_offset - chunks[-1].byte # pylint: disable=W0631,C0301
for i in range(len(chunks)-1):
chunks[i].length = chunks[i+1].byte - chunks[i].byte
+ #self.validate_chunks(chunks)
return chunks
+ def validate_chunks(self, chunks):
+ """Validate the rule that chunks have a single entrance."""
+ # starts is the entrances to the chunks
+ starts = set([ch.byte for ch in chunks])
+ for ch in chunks:
+ assert all((ex in starts or ex < 0) for ex in ch.exits)
+
def _arcs(self):
"""Find the executable arcs in the code.
- Returns a set of pairs, (from,to). From and to are integer line
- numbers. If from is < 0, then the arc is an entrance into the code
- object. If to is < 0, the arc is an exit from the code object.
+ Yields pairs: (from,to). From and to are integer line numbers. If
+ from is < 0, then the arc is an entrance into the code object. If to
+ is < 0, the arc is an exit from the code object.
"""
chunks = self._split_into_chunks()
@@ -540,65 +576,43 @@ class ByteParser(object):
# A map from byte offsets to chunks jumped into.
byte_chunks = dict([(c.byte, c) for c in chunks])
- # Build a map from byte offsets to actual lines reached.
- byte_lines = {}
- bytes_to_add = set([c.byte for c in chunks])
+ # There's always an entrance at the first chunk.
+ yield (-1, byte_chunks[0].line)
- while bytes_to_add:
- byte_to_add = bytes_to_add.pop()
- if byte_to_add in byte_lines or byte_to_add < 0:
+ # Traverse from the first chunk in each line, and yield arcs where
+ # the trace function will be invoked.
+ for chunk in chunks:
+ if not chunk.first:
continue
- # Which lines does this chunk lead to?
- bytes_considered = set()
- bytes_to_consider = [byte_to_add]
- lines = set()
-
- while bytes_to_consider:
- byte = bytes_to_consider.pop()
- bytes_considered.add(byte)
-
- # Find chunk for byte
- try:
- ch = byte_chunks[byte]
- except KeyError:
- for ch in chunks:
- if ch.byte <= byte < ch.byte+ch.length:
- break
- else:
- # No chunk for this byte!
- raise Exception("Couldn't find chunk @ %d" % byte)
- byte_chunks[byte] = ch
-
- if ch.line:
- lines.add(ch.line)
- else:
- for ex in ch.exits:
- if ex < 0:
- lines.add(ex)
- elif ex not in bytes_considered:
- bytes_to_consider.append(ex)
-
- bytes_to_add.update(ch.exits)
-
- byte_lines[byte_to_add] = lines
-
- # Figure out for each chunk where the exits go.
- arcs = set()
- for chunk in chunks:
- if chunk.line:
- for ex in chunk.exits:
+ chunks_considered = set()
+ chunks_to_consider = [chunk]
+ while chunks_to_consider:
+ # Get the chunk we're considering, and make sure we don't
+ # consider it again
+ this_chunk = chunks_to_consider.pop()
+ chunks_considered.add(this_chunk)
+
+ # For each exit, add the line number if the trace function
+ # would be triggered, or add the chunk to those being
+ # considered if not.
+ for ex in this_chunk.exits:
if ex < 0:
- exit_lines = [ex]
+ yield (chunk.line, ex)
else:
- exit_lines = byte_lines[ex]
- for exit_line in exit_lines:
- if chunk.line != exit_line:
- arcs.add((chunk.line, exit_line))
- for line in byte_lines[0]:
- arcs.add((-1, line))
-
- return arcs
+ next_chunk = byte_chunks[ex]
+ if next_chunk in chunks_considered:
+ continue
+
+ # The trace function is invoked if visiting the first
+ # bytecode in a line, or if the transition is a
+ # backward jump.
+ backward_jump = next_chunk.byte < this_chunk.byte
+ if next_chunk.first or backward_jump:
+ if next_chunk.line != chunk.line:
+ yield (chunk.line, next_chunk.line)
+ else:
+ chunks_to_consider.append(next_chunk)
def _all_chunks(self):
"""Returns a list of `Chunk` objects for this code and its children.
@@ -626,11 +640,11 @@ class ByteParser(object):
class Chunk(object):
- """A sequence of bytecodes with a single entrance.
+ """A sequence of byte codes with a single entrance.
To analyze byte code, we have to divide it into chunks, sequences of byte
- codes such that each basic block has only one entrance, the first
- instruction in the block.
+ codes such that each chunk has only one entrance, the first instruction in
+ the block.
This is almost the CS concept of `basic block`_, except that we're willing
to have many exits from a chunk, and "basic block" is a more cumbersome
@@ -638,17 +652,26 @@ class Chunk(object):
.. _basic block: http://en.wikipedia.org/wiki/Basic_block
+ `line` is the source line number containing this chunk.
+
+ `first` is true if this is the first chunk in the source line.
+
An exit < 0 means the chunk can leave the code (return). The exit is
the negative of the starting line number of the code block.
"""
- def __init__(self, byte, line=0):
+ def __init__(self, byte, line, first):
self.byte = byte
self.line = line
+ self.first = first
self.length = 0
self.exits = set()
def __repr__(self):
- return "<%d+%d @%d %r>" % (
- self.byte, self.length, self.line, list(self.exits)
+ if self.first:
+ bang = "!"
+ else:
+ bang = ""
+ return "<%d+%d @%d%s %r>" % (
+ self.byte, self.length, self.line, bang, list(self.exits)
)
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 3beebab1..166020e1 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -119,7 +119,7 @@ def source_encoding(source):
# This is mostly code adapted from Py3.2's tokenize module.
- cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
+ cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
# Do this so the detect_encode code we copied will work.
readline = iter(source.splitlines()).next
diff --git a/coverage/report.py b/coverage/report.py
index e351340f..34f44422 100644
--- a/coverage/report.py
+++ b/coverage/report.py
@@ -2,6 +2,7 @@
import fnmatch, os
from coverage.codeunit import code_unit_factory
+from coverage.files import prep_patterns
from coverage.misc import CoverageException, NoSource, NotPython
class Reporter(object):
@@ -35,7 +36,7 @@ class Reporter(object):
self.code_units = code_unit_factory(morfs, file_locator)
if self.config.include:
- patterns = [file_locator.abs_file(p) for p in self.config.include]
+ patterns = prep_patterns(self.config.include)
filtered = []
for cu in self.code_units:
for pattern in patterns:
@@ -45,7 +46,7 @@ class Reporter(object):
self.code_units = filtered
if self.config.omit:
- patterns = [file_locator.abs_file(p) for p in self.config.omit]
+ patterns = prep_patterns(self.config.omit)
filtered = []
for cu in self.code_units:
for pattern in patterns:
diff --git a/coverage/results.py b/coverage/results.py
index d7e2a9d1..77ff2a2d 100644
--- a/coverage/results.py
+++ b/coverage/results.py
@@ -2,7 +2,7 @@
import os
-from coverage.backward import set, sorted # pylint: disable=W0622
+from coverage.backward import iitems, set, sorted # pylint: disable=W0622
from coverage.misc import format_lines, join_regex, NoSource
from coverage.parser import CodeParser
@@ -21,7 +21,7 @@ class Analysis(object):
if not os.path.exists(self.filename):
source = self.coverage.file_locator.get_zip_data(self.filename)
if not source:
- raise NoSource("No source for code: %r" % self.filename)
+ raise NoSource("No source for code: '%s'" % self.filename)
self.parser = CodeParser(
text=source, filename=self.filename,
@@ -41,11 +41,12 @@ class Analysis(object):
)
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
- n_missing_branches = sum(
- [len(v) for k,v in mba.items() if k not in self.missing]
+ n_partial_branches = sum(
+ [len(v) for k,v in iitems(mba) if k not in self.missing]
)
+ n_missing_branches = sum([len(v) for k,v in iitems(mba)])
else:
- n_branches = n_missing_branches = 0
+ n_branches = n_partial_branches = n_missing_branches = 0
self.no_branch = set()
self.numbers = Numbers(
@@ -54,6 +55,7 @@ class Analysis(object):
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
+ n_partial_branches=n_partial_branches,
n_missing_branches=n_missing_branches,
)
@@ -109,7 +111,7 @@ class Analysis(object):
def branch_lines(self):
"""Returns a list of line numbers that have more than one exit."""
exit_counts = self.parser.exit_counts()
- return [l1 for l1,count in exit_counts.items() if count > 1]
+ return [l1 for l1,count in iitems(exit_counts) if count > 1]
def total_branches(self):
"""How many total branches are there?"""
@@ -166,13 +168,14 @@ class Numbers(object):
_near100 = 99.0
def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
- n_branches=0, n_missing_branches=0
+ n_branches=0, n_partial_branches=0, n_missing_branches=0
):
self.n_files = n_files
self.n_statements = n_statements
self.n_excluded = n_excluded
self.n_missing = n_missing
self.n_branches = n_branches
+ self.n_partial_branches = n_partial_branches
self.n_missing_branches = n_missing_branches
def set_precision(cls, precision):
@@ -236,8 +239,12 @@ class Numbers(object):
nums.n_excluded = self.n_excluded + other.n_excluded
nums.n_missing = self.n_missing + other.n_missing
nums.n_branches = self.n_branches + other.n_branches
- nums.n_missing_branches = (self.n_missing_branches +
- other.n_missing_branches)
+ nums.n_partial_branches = (
+ self.n_partial_branches + other.n_partial_branches
+ )
+ nums.n_missing_branches = (
+ self.n_missing_branches + other.n_missing_branches
+ )
return nums
def __radd__(self, other):
diff --git a/coverage/summary.py b/coverage/summary.py
index c8fa5be4..c99c5303 100644
--- a/coverage/summary.py
+++ b/coverage/summary.py
@@ -29,7 +29,7 @@ class SummaryReporter(Reporter):
header = (fmt_name % "Name") + " Stmts Miss"
fmt_coverage = fmt_name + "%6d %6d"
if self.branches:
- header += " Branch BrPart"
+ header += " Branch BrMiss"
fmt_coverage += " %6d %6d"
width100 = Numbers.pc_str_width()
header += "%*s" % (width100+4, "Cover")
@@ -62,7 +62,7 @@ class SummaryReporter(Reporter):
args += (analysis.missing_formatted(),)
outfile.write(fmt_coverage % args)
total += nums
- except KeyboardInterrupt: # pragma: no cover
+ except KeyboardInterrupt: # pragma: not covered
raise
except:
report_it = not self.config.ignore_errors
@@ -82,3 +82,5 @@ class SummaryReporter(Reporter):
if self.config.show_missing:
args += ("",)
outfile.write(fmt_coverage % args)
+
+ return total.pc_covered
diff --git a/coverage/tracer.c b/coverage/tracer.c
index 3bed5f80..97dd113b 100644
--- a/coverage/tracer.c
+++ b/coverage/tracer.c
@@ -27,7 +27,8 @@
#define MyText_Type PyUnicode_Type
#define MyText_Check(o) PyUnicode_Check(o)
-#define MyText_AS_STRING(o) PyBytes_AS_STRING(PyUnicode_AsASCIIString(o))
+#define MyText_AS_BYTES(o) PyUnicode_AsASCIIString(o)
+#define MyText_AS_STRING(o) PyBytes_AS_STRING(o)
#define MyInt_FromLong(l) PyLong_FromLong(l)
#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0)
@@ -36,6 +37,7 @@
#define MyText_Type PyString_Type
#define MyText_Check(o) PyString_Check(o)
+#define MyText_AS_BYTES(o) (Py_INCREF(o), o)
#define MyText_AS_STRING(o) PyString_AS_STRING(o)
#define MyInt_FromLong(l) PyInt_FromLong(l)
@@ -207,7 +209,9 @@ showlog(int depth, int lineno, PyObject * filename, const char * msg)
printf(" ");
}
if (filename) {
- printf(" %s", MyText_AS_STRING(filename));
+ PyObject *ascii = MyText_AS_BYTES(filename);
+ printf(" %s", MyText_AS_STRING(ascii));
+ Py_DECREF(ascii);
}
if (msg) {
printf(" %s", msg);
@@ -231,10 +235,8 @@ CTracer_record_pair(CTracer *self, int l1, int l2)
{
int ret = RET_OK;
- PyObject * t = PyTuple_New(2);
+ PyObject * t = Py_BuildValue("(ii)", l1, l2);
if (t != NULL) {
- PyTuple_SET_ITEM(t, 0, MyInt_FromLong(l1));
- PyTuple_SET_ITEM(t, 1, MyInt_FromLong(l2));
if (PyDict_SetItem(self->cur_file_data, t, Py_None) < 0) {
STATS( self->stats.errors++; )
ret = RET_ERROR;
@@ -257,17 +259,24 @@ CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unuse
int ret = RET_OK;
PyObject * filename = NULL;
PyObject * tracename = NULL;
+ #if WHAT_LOG || TRACE_LOG
+ PyObject * ascii = NULL;
+ #endif
#if WHAT_LOG
if (what <= sizeof(what_sym)/sizeof(const char *)) {
- printf("trace: %s @ %s %d\n", what_sym[what], MyText_AS_STRING(frame->f_code->co_filename), frame->f_lineno);
+ ascii = MyText_AS_BYTES(frame->f_code->co_filename);
+ printf("trace: %s @ %s %d\n", what_sym[what], MyText_AS_STRING(ascii), frame->f_lineno);
+ Py_DECREF(ascii);
}
#endif
#if TRACE_LOG
- if (strstr(MyText_AS_STRING(frame->f_code->co_filename), start_file) && frame->f_lineno == start_line) {
+ ascii = MyText_AS_BYTES(frame->f_code->co_filename);
+ if (strstr(MyText_AS_STRING(ascii), start_file) && frame->f_lineno == start_line) {
logging = 1;
}
+ Py_DECREF(ascii);
#endif
/* See below for details on missing-return detection. */
@@ -508,7 +517,10 @@ CTracer_call(CTracer *self, PyObject *args, PyObject *kwds)
/* In Python, the what argument is a string, we need to find an int
for the C function. */
for (what = 0; what_names[what]; what++) {
- if (!strcmp(MyText_AS_STRING(what_str), what_names[what])) {
+ PyObject *ascii = MyText_AS_BYTES(what_str);
+ int should_break = !strcmp(MyText_AS_STRING(ascii), what_names[what]);
+ Py_DECREF(ascii);
+ if (should_break) {
break;
}
}
@@ -716,4 +728,3 @@ inittracer(void)
}
#endif /* Py3k */
-
diff --git a/coverage/version.py b/coverage/version.py
new file mode 100644
index 00000000..db4bca5d
--- /dev/null
+++ b/coverage/version.py
@@ -0,0 +1,9 @@
+"""The version and URL for coverage.py"""
+# This file is exec'ed in setup.py, don't import anything!
+
+__version__ = "3.6.1a1" # see detailed history in CHANGES.txt
+
+__url__ = "http://nedbatchelder.com/code/coverage"
+if max(__version__).isalpha():
+ # For pre-releases, use a version-specific URL.
+ __url__ += "/" + __version__
diff --git a/coverage/xmlreport.py b/coverage/xmlreport.py
index a65d5a6d..301bc865 100644
--- a/coverage/xmlreport.py
+++ b/coverage/xmlreport.py
@@ -4,7 +4,7 @@ import os, sys, time
import xml.dom.minidom
from coverage import __url__, __version__
-from coverage.backward import sorted # pylint: disable=W0622
+from coverage.backward import sorted, rpartition # pylint: disable=W0622
from coverage.report import Reporter
def rate(hit, num):
@@ -84,14 +84,18 @@ class XmlReporter(Reporter):
# Use the DOM to write the output file.
outfile.write(self.xml_out.toprettyxml())
+ # Return the total percentage.
+ return 100.0 * (lhits_tot + bhits_tot) / (lnum_tot + bnum_tot)
+
def xml_file(self, cu, analysis):
"""Add to the XML report for a single file."""
# Create the 'lines' and 'package' XML elements, which
# are populated later. Note that a package == a directory.
- dirname, fname = os.path.split(cu.name)
- dirname = dirname or '.'
- package = self.packages.setdefault(dirname, [ {}, 0, 0, 0, 0 ])
+ package_name = rpartition(cu.name, ".")[0]
+ className = cu.name
+
+ package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
xclass = self.xml_out.createElement("class")
@@ -99,10 +103,10 @@ class XmlReporter(Reporter):
xlines = self.xml_out.createElement("lines")
xclass.appendChild(xlines)
- className = fname.replace('.', '_')
+
xclass.setAttribute("name", className)
- ext = os.path.splitext(cu.filename)[1]
- xclass.setAttribute("filename", cu.name + ext)
+ filename = cu.file_locator.relative_filename(cu.filename)
+ xclass.setAttribute("filename", filename.replace("\\", "/"))
xclass.setAttribute("complexity", "0")
branch_stats = analysis.branch_stats()
@@ -114,7 +118,7 @@ class XmlReporter(Reporter):
# Q: can we get info about the number of times a statement is
# executed? If so, that should be recorded here.
- xline.setAttribute("hits", str(int(not line in analysis.missing)))
+ xline.setAttribute("hits", str(int(line not in analysis.missing)))
if self.arcs:
if line in branch_stats: