summaryrefslogtreecommitdiff
path: root/coverage/parser.py
diff options
context:
space:
mode:
authorNed Batchelder <ned@nedbatchelder.com>2013-10-26 22:08:47 -0400
committerNed Batchelder <ned@nedbatchelder.com>2013-10-26 22:08:47 -0400
commitbde6d2060bebcf7c8a3a365f9b9c01a9d801dbe9 (patch)
tree1c68a60b747085dd3e535f4f97709fe45c3c3aad /coverage/parser.py
parent9deabd31fe51f8bf7dad45c342478878e703605a (diff)
downloadpython-coveragepy-bde6d2060bebcf7c8a3a365f9b9c01a9d801dbe9.tar.gz
Cache generate_tokens to speed HTML reports.
Diffstat (limited to 'coverage/parser.py')
-rw-r--r--coverage/parser.py30
1 files changed, 29 insertions, 1 deletions
diff --git a/coverage/parser.py b/coverage/parser.py
index 7459eef..ed8f379 100644
--- a/coverage/parser.py
+++ b/coverage/parser.py
@@ -108,7 +108,7 @@ class CodeParser(object):
first_line = None
empty = True
- tokgen = tokenize.generate_tokens(StringIO(self.text).readline)
+ tokgen = generate_tokens(self.text)
for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
if self.show_tokens: # pragma: not covered
print("%10s %5s %-20r %r" % (
@@ -669,3 +669,31 @@ class Chunk(object):
return "<%d+%d @%d%s %r>" % (
self.byte, self.length, self.line, bang, list(self.exits)
)
+
+
+class CachedTokenizer(object):
+ """A one-element cache around tokenize.generate_tokens.
+
+ When reporting, coverage.py tokenizes files twice, once to find the
+ structure of the file, and once to syntax-color it. Tokenizing is
+ expensive, and easily cached.
+
+ This is a one-element cache so that our twice-in-a-row tokenizing doesn't
+ actually tokenize twice.
+
+ """
+ def __init__(self):
+ self.last_text = None
+ self.last_tokens = None
+
+ def generate_tokens(self, text):
+ """A stand-in for `tokenize.generate_tokens`."""
+ if text != self.last_text:
+ self.last_text = text
+ self.last_tokens = list(
+ tokenize.generate_tokens(StringIO(text).readline)
+ )
+ return self.last_tokens
+
+# Create our generate_tokens cache as a callable replacement function.
+generate_tokens = CachedTokenizer().generate_tokens