summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--coverage/backward.py18
-rw-r--r--coverage/execfile.py9
-rw-r--r--coverage/files.py11
-rw-r--r--coverage/parser.py41
-rw-r--r--coverage/phystokens.py53
5 files changed, 76 insertions, 56 deletions
diff --git a/coverage/backward.py b/coverage/backward.py
index 030d336e..f9402f41 100644
--- a/coverage/backward.py
+++ b/coverage/backward.py
@@ -5,7 +5,9 @@
# pylint: disable=unused-import
# pylint: disable=no-name-in-module
-import os, re, sys
+import os
+import re
+import sys
# Pythons 2 and 3 differ on where to get StringIO.
try:
@@ -50,17 +52,6 @@ else:
"""Produce the items from dict `d`."""
return d.iteritems()
-# Reading Python source and interpreting the coding comment is a big deal.
-if sys.version_info >= (3, 0):
- # Python 3.2 provides `tokenize.open`, the best way to open source files.
- import tokenize
- open_python_source = tokenize.open
-else:
- def open_python_source(fname):
- """Open a source file the best way."""
- return open(fname, "rU")
-
-
# Python 3.x is picky about bytes and strings, so provide methods to
# get them right, and make them no-ops in 2.x
if sys.version_info >= (3, 0):
@@ -118,7 +109,8 @@ except KeyError:
# imp was deprecated in Python 3.3
try:
- import importlib, importlib.util
+ import importlib
+ import importlib.util
imp = None
except ImportError:
importlib = None
diff --git a/coverage/execfile.py b/coverage/execfile.py
index 299fea9b..246d79a5 100644
--- a/coverage/execfile.py
+++ b/coverage/execfile.py
@@ -1,10 +1,14 @@
"""Execute files of Python code."""
-import marshal, os, sys, types
+import marshal
+import os
+import sys
+import types
-from coverage.backward import open_python_source, BUILTINS
+from coverage.backward import BUILTINS
from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
+from coverage.phystokens import open_python_source
class DummyLoader(object):
@@ -117,7 +121,6 @@ def run_python_file(filename, args, package=None, modulename=None):
if modulename is None and sys.version_info >= (3, 3):
modulename = '__main__'
-
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = types.ModuleType('__main__')
diff --git a/coverage/files.py b/coverage/files.py
index c2d7153d..6ba7983b 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -1,10 +1,15 @@
"""File wrangling."""
-import fnmatch, os, os.path, re, sys
-import ntpath, posixpath
+import fnmatch
+import os
+import os.path
+import re
+import sys
+import ntpath
+import posixpath
-from coverage.backward import open_python_source
from coverage.misc import CoverageException, join_regex
+from coverage.phystokens import open_python_source
class FileLocator(object):
diff --git a/coverage/parser.py b/coverage/parser.py
index 97cc01bb..317f7ec7 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -1,6 +1,10 @@
"""Code parsing for Coverage."""
-import collections, dis, re, token, tokenize
+import collections
+import dis
+import re
+import token
+import tokenize
from coverage.backward import range # pylint: disable=redefined-builtin
from coverage.backward import bytes_to_ints
@@ -8,6 +12,7 @@ from coverage.bytecode import ByteCodes, CodeObjects
from coverage.files import get_python_source
from coverage.misc import nice_pair, expensive, join_regex
from coverage.misc import CoverageException, NoSource, NotPython
+from coverage.phystokens import generate_tokens
class CodeParser(object):
@@ -287,6 +292,7 @@ def _opcode(name):
"""Return the opcode by name from the dis module."""
return dis.opmap[name]
+
def _opcode_set(*names):
"""Return a set of opcodes by the names in `names`."""
s = set()
@@ -671,36 +677,3 @@ class Chunk(object):
return "<%d+%d @%d%s %r>" % (
self.byte, self.length, self.line, bang, list(self.exits)
)
-
-
-class CachedTokenizer(object):
- """A one-element cache around tokenize.generate_tokens.
-
- When reporting, coverage.py tokenizes files twice, once to find the
- structure of the file, and once to syntax-color it. Tokenizing is
- expensive, and easily cached.
-
- This is a one-element cache so that our twice-in-a-row tokenizing doesn't
- actually tokenize twice.
-
- """
- def __init__(self):
- self.last_text = None
- self.last_tokens = None
-
- def generate_tokens(self, text):
- """A stand-in for `tokenize.generate_tokens`."""
- # Check the type first so we don't compare bytes to unicode and get
- # warnings.
- if type(text) != type(self.last_text) or text != self.last_text:
- self.last_text = text
- line_iter = iter(text.splitlines(True))
- try:
- readline = line_iter.next
- except AttributeError:
- readline = line_iter.__next__
- self.last_tokens = list(tokenize.generate_tokens(readline))
- return self.last_tokens
-
-# Create our generate_tokens cache as a callable replacement function.
-generate_tokens = CachedTokenizer().generate_tokens
diff --git a/coverage/phystokens.py b/coverage/phystokens.py
index 3fd1165c..4faa3c3f 100644
--- a/coverage/phystokens.py
+++ b/coverage/phystokens.py
@@ -1,8 +1,11 @@
"""Better tokenizing for coverage.py."""
-import codecs, keyword, re, sys, token, tokenize
-
-from coverage.parser import generate_tokens
+import codecs
+import keyword
+import re
+import sys
+import token
+import tokenize
def phys_tokens(toks):
@@ -111,6 +114,39 @@ def source_token_lines(source):
yield line
+class CachedTokenizer(object):
+ """A one-element cache around tokenize.generate_tokens.
+
+ When reporting, coverage.py tokenizes files twice, once to find the
+ structure of the file, and once to syntax-color it. Tokenizing is
+ expensive, and easily cached.
+
+ This is a one-element cache so that our twice-in-a-row tokenizing doesn't
+ actually tokenize twice.
+
+ """
+ def __init__(self):
+ self.last_text = None
+ self.last_tokens = None
+
+ def generate_tokens(self, text):
+ """A stand-in for `tokenize.generate_tokens`."""
+ # Check the type first so we don't compare bytes to unicode and get
+ # warnings.
+ if type(text) != type(self.last_text) or text != self.last_text:
+ self.last_text = text
+ line_iter = iter(text.splitlines(True))
+ try:
+ readline = line_iter.next
+ except AttributeError:
+ readline = line_iter.__next__
+ self.last_tokens = list(tokenize.generate_tokens(readline))
+ return self.last_tokens
+
+# Create our generate_tokens cache as a callable replacement function.
+generate_tokens = CachedTokenizer().generate_tokens
+
+
def source_encoding(source):
"""Determine the encoding for `source` (a string), according to PEP 263.
@@ -205,3 +241,14 @@ def source_encoding(source):
return encoding
return default
+
+
+# Reading Python source and interpreting the coding comment is a big deal.
+if sys.version_info >= (3, 0):
+ # Python 3.2 provides `tokenize.open`, the best way to open source files.
+ import tokenize
+ open_python_source = tokenize.open
+else:
+ def open_python_source(fname):
+ """Open a source file the best way."""
+ return open(fname, "rU")