diff options
author | Tim Hatch <tim@timhatch.com> | 2014-04-23 13:06:38 -0400 |
---|---|---|
committer | Tim Hatch <tim@timhatch.com> | 2014-04-23 13:06:38 -0400 |
commit | eed55ef38fbe44a5223927c6c1bc3048653623d6 (patch) | |
tree | 3e461d2cdc99970b2130e0426dd883884fbb86b9 /pygments/formatters/other.py | |
parent | 6747ef30d28dbc52edf019e47f6edb7fb773b3db (diff) | |
download | pygments-eed55ef38fbe44a5223927c6c1bc3048653623d6.tar.gz |
Add testcase formatter
Usage: echo 'print "hi";' | pygmentize -l python -f testcase
Diffstat (limited to 'pygments/formatters/other.py')
-rw-r--r-- | pygments/formatters/other.py | 48 |
1 files changed, 47 insertions, 1 deletions
diff --git a/pygments/formatters/other.py b/pygments/formatters/other.py index 7368a642..b6e4bc58 100644 --- a/pygments/formatters/other.py +++ b/pygments/formatters/other.py @@ -14,7 +14,7 @@ from pygments.util import OptionError, get_choice_opt from pygments.token import Token from pygments.console import colorize -__all__ = ['NullFormatter', 'RawTokenFormatter'] +__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter'] class NullFormatter(Formatter): @@ -114,3 +114,49 @@ class RawTokenFormatter(Formatter): for ttype, value in tokensource: write("%s\t%r\n" % (ttype, value)) flush() + +TESTCASE_BEFORE = u'''\ + def testNeedsName(self): + fragment = %r + expected = [ +''' +TESTCASE_AFTER = u'''\ + ] + self.assertEqual(expected, list(self.lexer.get_tokens(fragment))) +''' + + +class TestcaseFormatter(Formatter): + """ + Format tokens as appropriate for a new testcase. + + .. versionadded:: 2.0 + """ + name = 'Testcase' + aliases = ['testcase'] + + def __init__(self, **options): + Formatter.__init__(self, **options) + #if self.encoding != 'utf-8': + # print >>sys.stderr, "NOTICE: Forcing encoding to utf-8, as all Pygments source is" + if self.encoding is not None and self.encoding != 'utf-8': + raise ValueError("Only None and utf-u are allowed encodings.") + + def format(self, tokensource, outfile): + indentation = ' ' * 12 + rawbuf = [] + outbuf = [] + for ttype, value in tokensource: + rawbuf.append(value) + outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value)) + + before = TESTCASE_BEFORE % (u''.join(rawbuf),) + during = u''.join(outbuf) + after = TESTCASE_AFTER + if self.encoding is None: + outfile.write(before + during + after) + else: + outfile.write(before.encode('utf-8')) + outfile.write(during.encode('utf-8')) + outfile.write(after.encode('utf-8')) + outfile.flush() |