diff options
author | Christian Hammond <christian@beanbaginc.com> | 2016-11-04 16:57:38 -0700 |
---|---|---|
committer | Christian Hammond <christian@beanbaginc.com> | 2016-11-04 16:57:38 -0700 |
commit | 6ded9db39463372e5205a36bea72d6de516ece69 (patch) | |
tree | 1d1f497cc99dd44d2ee7e2c3daa35965157ff924 /tests/test_perllexer.py | |
download | pygments-git-6ded9db39463372e5205a36bea72d6de516ece69.tar.gz |
Add support for partials and path segments for Handlebars.
This introduces support for some missing features to the Handlebars lexer:
Partials and path segments. Partials mostly appeared to work before, but the
`>` in `{{> ... }}` would appear as a syntax error, as could other
components of the partial. This change introduces support for:
* Standard partials: `{{> partialName}}`
* Partials with parameters: `{{> partialName varname="value"}}`
* Ddynamic partials: `{{> (partialFunc)}}`
* Ddynamic partials with lookups: `{{> (lookup ../path "partialName")}}`
* Partial blocks: `{{> @partial-block}}`
* Inline partials: `{{#*inline}}..{{/inline}}`
It also introduces support for path segments, which can reference content in
the current context or in a parent context. For instance, `this.name`,
`this/name`, `./name`, `../name`, `this/name`, etc. These are all now tracked
as variables.
Diffstat (limited to 'tests/test_perllexer.py')
-rw-r--r-- | tests/test_perllexer.py | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/tests/test_perllexer.py b/tests/test_perllexer.py new file mode 100644 index 00000000..26b2d0a7 --- /dev/null +++ b/tests/test_perllexer.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- +""" + Pygments regex lexer tests + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import time +import unittest + +from pygments.token import String +from pygments.lexers.perl import PerlLexer + + +class RunawayRegexTest(unittest.TestCase): + # A previous version of the Perl lexer would spend a great deal of + # time backtracking when given particular strings. These tests show that + # the runaway backtracking doesn't happen any more (at least for the given + # cases). + + lexer = PerlLexer() + + ### Test helpers. + + def assert_single_token(self, s, token): + """Show that a given string generates only one token.""" + tokens = list(self.lexer.get_tokens_unprocessed(s)) + self.assertEqual(len(tokens), 1, tokens) + self.assertEqual(s, tokens[0][2]) + self.assertEqual(token, tokens[0][1]) + + def assert_tokens(self, strings, expected_tokens): + """Show that a given string generates the expected tokens.""" + tokens = list(self.lexer.get_tokens_unprocessed(''.join(strings))) + self.assertEqual(len(tokens), len(expected_tokens), tokens) + for index, s in enumerate(strings): + self.assertEqual(s, tokens[index][2]) + self.assertEqual(expected_tokens[index], tokens[index][1]) + + def assert_fast_tokenization(self, s): + """Show that a given string is tokenized quickly.""" + start = time.time() + tokens = list(self.lexer.get_tokens_unprocessed(s)) + end = time.time() + # Isn't 10 seconds kind of a long time? Yes, but we don't want false + # positives when the tests are starved for CPU time. + if end-start > 10: + self.fail('tokenization took too long') + return tokens + + ### Strings. + + def test_single_quote_strings(self): + self.assert_single_token(r"'foo\tbar\\\'baz'", String) + self.assert_fast_tokenization("'" + '\\'*999) + + def test_double_quote_strings(self): + self.assert_single_token(r'"foo\tbar\\\"baz"', String) + self.assert_fast_tokenization('"' + '\\'*999) + + def test_backtick_strings(self): + self.assert_single_token(r'`foo\tbar\\\`baz`', String.Backtick) + self.assert_fast_tokenization('`' + '\\'*999) + + ### Regex matches with various delimiters. + + def test_match(self): + self.assert_single_token(r'/aa\tbb/', String.Regex) + self.assert_fast_tokenization('/' + '\\'*999) + + def test_match_with_slash(self): + self.assert_tokens(['m', '/\n\\t\\\\/'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m/xxx\n' + '\\'*999) + + def test_match_with_bang(self): + self.assert_tokens(['m', r'!aa\t\!bb!'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m!' + '\\'*999) + + def test_match_with_brace(self): + self.assert_tokens(['m', r'{aa\t\}bb}'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m{' + '\\'*999) + + def test_match_with_angle_brackets(self): + self.assert_tokens(['m', r'<aa\t\>bb>'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m<' + '\\'*999) + + def test_match_with_parenthesis(self): + self.assert_tokens(['m', r'(aa\t\)bb)'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m(' + '\\'*999) + + def test_match_with_at_sign(self): + self.assert_tokens(['m', r'@aa\t\@bb@'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m@' + '\\'*999) + + def test_match_with_percent_sign(self): + self.assert_tokens(['m', r'%aa\t\%bb%'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m%' + '\\'*999) + + def test_match_with_dollar_sign(self): + self.assert_tokens(['m', r'$aa\t\$bb$'], [String.Regex, String.Regex]) + self.assert_fast_tokenization('m$' + '\\'*999) + + ### Regex substitutions with various delimeters. + + def test_substitution_with_slash(self): + self.assert_single_token('s/aaa/bbb/g', String.Regex) + self.assert_fast_tokenization('s/foo/' + '\\'*999) + + def test_substitution_with_at_sign(self): + self.assert_single_token(r's@aaa@bbb@g', String.Regex) + self.assert_fast_tokenization('s@foo@' + '\\'*999) + + def test_substitution_with_percent_sign(self): + self.assert_single_token(r's%aaa%bbb%g', String.Regex) + self.assert_fast_tokenization('s%foo%' + '\\'*999) + + def test_substitution_with_brace(self): + self.assert_single_token(r's{aaa}', String.Regex) + self.assert_fast_tokenization('s{' + '\\'*999) + + def test_substitution_with_angle_bracket(self): + self.assert_single_token(r's<aaa>', String.Regex) + self.assert_fast_tokenization('s<' + '\\'*999) + + def test_substitution_with_angle_bracket(self): + self.assert_single_token(r's<aaa>', String.Regex) + self.assert_fast_tokenization('s<' + '\\'*999) + + def test_substitution_with_square_bracket(self): + self.assert_single_token(r's[aaa]', String.Regex) + self.assert_fast_tokenization('s[' + '\\'*999) + + def test_substitution_with_parenthesis(self): + self.assert_single_token(r's(aaa)', String.Regex) + self.assert_fast_tokenization('s(' + '\\'*999) |