diff options
-rw-r--r-- | pygments/filters/__init__.py | 3 | ||||
-rw-r--r-- | pygments/lexer.py | 2 | ||||
-rw-r--r-- | tests/test_basic_api.py | 13 | ||||
-rw-r--r-- | tests/test_cmdline.py | 17 |
4 files changed, 30 insertions, 5 deletions
diff --git a/pygments/filters/__init__.py b/pygments/filters/__init__.py index ccb78652..375a638e 100644 --- a/pygments/filters/__init__.py +++ b/pygments/filters/__init__.py @@ -127,12 +127,13 @@ class NameHighlightFilter(Filter): ) This would highlight the names "foo", "bar" and "baz" - as functions. `Name.Function` is the token default. + as functions. `Name.Function` is the default token type. """ def __init__(self, **options): Filter.__init__(self, **options) self.names = set(get_list_opt(options, 'names', [])) + print "!!!!!!", self.names tokentype = options.get('tokentype') if tokentype: self.tokentype = string_to_tokentype(tokentype) diff --git a/pygments/lexer.py b/pygments/lexer.py index 25d36e08..709587d7 100644 --- a/pygments/lexer.py +++ b/pygments/lexer.py @@ -101,7 +101,7 @@ class Lexer(object): Add a new stream filter to this lexer. """ if not isinstance(filter_, Filter): - filter = get_filter_by_name(filter_, **options) + filter_ = get_filter_by_name(filter_, **options) self.filters.append(filter_) def analyse_text(text): diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py index a5abd2b8..f07c00e7 100644 --- a/tests/test_basic_api.py +++ b/tests/test_basic_api.py @@ -7,11 +7,12 @@ :license: BSD, see LICENSE for more details. """ +import os import unittest import StringIO import random -from pygments import lexers, formatters, format +from pygments import lexers, formatters, filters, format from pygments.token import _TokenType, Text from pygments.lexer import RegexLexer @@ -70,6 +71,16 @@ class LexersTest(unittest.TestCase): a(isinstance(x, lexers.PythonLexer)) ae(x.options["opt"], "val") + def test_filters(self): + for x in filters.FILTERS.keys(): + lx = lexers.PythonLexer() + lx.add_filter(x) + text = file(os.path.join(testdir, testfile)).read().decode('utf-8') + tokens = list(lx.get_tokens(text)) + roundtext = ''.join([t[1] for t in tokens]) + self.assertEquals(roundtext, text, + "lexer roundtrip with %s filter failed" % x) + class FormattersTest(unittest.TestCase): diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py index 9a068d53..e82eb247 100644 --- a/tests/test_cmdline.py +++ b/tests/test_cmdline.py @@ -33,8 +33,14 @@ class CmdLineTest(unittest.TestCase): def test_L_opt(self): c, o, e = run_cmdline("-L") - self.assert_(c == 0) - self.assert_(o.find("Lexers") and o.find("Formatters")) + self.assertEquals(c, 0) + self.assert_("Lexers" in o and "Formatters" in o and + "Filters" in o and "Styles" in o) + c, o, e = run_cmdline("-L", "lexer") + self.assertEquals(c, 0) + self.assert_("Lexers" in o and not "Formatters" in o) + c, o, e = run_cmdline("-L", "lexers") + self.assertEquals(c, 0) def test_O_opt(self): filename = os.path.join(testdir, testfile) @@ -43,6 +49,13 @@ class CmdLineTest(unittest.TestCase): self.assert_("<html" in o) self.assert_('class="linenos"' in o) + def test_F_opt(self): + filename = os.path.join(testdir, testfile) + c, o, e = run_cmdline("-Fhighlight:tokentype=Name.Blubb,names=testfile testdir", + "-fhtml", filename) + self.assertEquals(c, 0) + self.assert_('<span class="n-Blubb' in o) + def test_invalid_opts(self): for opts in [("-L", "-lpy"), ("-L", "-fhtml"), ("-L", "-Ox"), ("-a",), ("-Sst", "-lpy")]: |