From 4d27daa332b301a8cfcf0cac6345d56bfc17e5fe Mon Sep 17 00:00:00 2001 From: Georg Brandl Date: Mon, 6 May 2019 18:02:47 +0200 Subject: Remove unittest classes from the test suite. --- CHANGES | 2 + tests/test_basic.py | 123 +++--- tests/test_basic_api.py | 26 +- tests/test_bibtex.py | 446 ++++++++++---------- tests/test_cfm.py | 65 +-- tests/test_clexer.py | 465 ++++++++++----------- tests/test_cmdline.py | 495 ++++++++++++----------- tests/test_cpp.py | 40 +- tests/test_crystal.py | 576 +++++++++++++------------- tests/test_csound.py | 851 ++++++++++++++++++++------------------- tests/test_data.py | 164 ++++---- tests/test_ezhil.py | 318 +++++++-------- tests/test_html_formatter.py | 338 ++++++++-------- tests/test_inherit.py | 64 +-- tests/test_irc_formatter.py | 15 +- tests/test_java.py | 124 +++--- tests/test_javascript.py | 73 ++-- tests/test_julia.py | 91 ++--- tests/test_kotlin.py | 237 +++++------ tests/test_latex_formatter.py | 63 ++- tests/test_lexers_other.py | 108 +++-- tests/test_markdown_lexer.py | 39 +- tests/test_objectiveclexer.py | 124 +++--- tests/test_perllexer.py | 250 +++++++----- tests/test_php.py | 45 ++- tests/test_praat.py | 239 +++++------ tests/test_properties.py | 159 ++++---- tests/test_python.py | 228 +++++------ tests/test_qbasiclexer.py | 54 +-- tests/test_r.py | 111 ++--- tests/test_regexlexer.py | 68 ++-- tests/test_regexopt.py | 183 ++++----- tests/test_rtf_formatter.py | 192 +++++---- tests/test_ruby.py | 268 ++++++------ tests/test_shell.py | 261 ++++++------ tests/test_smarty.py | 50 +-- tests/test_sql.py | 203 +++++----- tests/test_terminal_formatter.py | 86 ++-- tests/test_textfmts.py | 52 ++- tests/test_token.py | 64 ++- tests/test_unistring.py | 65 ++- tests/test_using_api.py | 21 +- tests/test_util.py | 395 +++++++++--------- tests/test_whiley.py | 33 +- tox.ini | 9 +- 45 files changed, 3994 insertions(+), 3889 deletions(-) diff --git a/CHANGES b/CHANGES index db1fb485..9e591824 100644 --- a/CHANGES +++ b/CHANGES @@ -50,6 +50,8 @@ Version 2.4.0 - Fix rare unicode errors on Python 2.7 (PR#798, #1492) - TypoScript uses ``.typoscript`` now (#1498) - Updated Trove classifiers and ``pip`` requirements (PR#799) +- Test suite switched to py.test, removed nose dependency (#1490) + Version 2.3.1 ------------- diff --git a/tests/test_basic.py b/tests/test_basic.py index 03d10cd2..e2255f5b 100644 --- a/tests/test_basic.py +++ b/tests/test_basic.py @@ -3,72 +3,71 @@ Pygments Basic lexers tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import unittest + +import pytest from pygments.lexers.basic import VBScriptLexer from pygments.token import Error, Name, Number, Punctuation, String, Whitespace -class VBScriptLexerTest(unittest.TestCase): - - def setUp(self): - self.lexer = VBScriptLexer() - - def _assert_are_tokens_of_type(self, examples, expected_token_type): - for test_number, example in enumerate(examples.split(), 1): - token_count = 0 - for token_type, token_value in self.lexer.get_tokens(example): - if token_type != Whitespace: - token_count += 1 - self.assertEqual( - token_type, expected_token_type, - 'token_type #%d for %s is be %s but must be %s' % - (test_number, token_value, token_type, expected_token_type)) - self.assertEqual( - token_count, 1, - '%s must yield exactly 1 token instead of %d' % - (example, token_count)) - - def _assert_tokens_match(self, text, expected_tokens_without_trailing_newline): - actual_tokens = tuple(self.lexer.get_tokens(text)) - if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): - actual_tokens = tuple(actual_tokens[:-1]) - self.assertEqual( - expected_tokens_without_trailing_newline, actual_tokens, - 'text must yield expected tokens: %s' % text) - - def test_can_lex_float(self): - self._assert_are_tokens_of_type( - '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', Number.Float) - self._assert_tokens_match( - '1e2.1e2', - ((Number.Float, '1e2'), (Number.Float, '.1e2')) - ) - - def test_can_reject_almost_float(self): - self._assert_tokens_match( - '.e1', - ((Punctuation, '.'), (Name, 'e1'))) - - def test_can_lex_integer(self): - self._assert_are_tokens_of_type( - '1 23 456', Number.Integer) - - def test_can_lex_names(self): - self._assert_are_tokens_of_type( - u'thingy thingy123 _thingy _123', Name) - - def test_can_recover_after_unterminated_string(self): - self._assert_tokens_match( - '"x\nx', - ((String.Double, '"'), (String.Double, 'x'), (Error, '\n'), (Name, 'x')) - ) - - def test_can_recover_from_invalid_character(self): - self._assert_tokens_match( - 'a;bc\nd', - ((Name, 'a'), (Error, ';bc\n'), (Name, 'd')) - ) +@pytest.fixture(scope='module') +def lexer(): + yield VBScriptLexer() + + +def assert_are_tokens_of_type(lexer, examples, expected_token_type): + for test_number, example in enumerate(examples.split(), 1): + token_count = 0 + for token_type, token_value in lexer.get_tokens(example): + if token_type != Whitespace: + token_count += 1 + assert token_type == expected_token_type, \ + 'token_type #%d for %s is be %s but must be %s' % \ + (test_number, token_value, token_type, expected_token_type) + assert token_count == 1, \ + '%s must yield exactly 1 token instead of %d' % (example, token_count) + + +def assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline): + actual_tokens = tuple(lexer.get_tokens(text)) + if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')): + actual_tokens = tuple(actual_tokens[:-1]) + assert expected_tokens_without_trailing_newline == actual_tokens, \ + 'text must yield expected tokens: %s' % text + + +def test_can_lex_float(lexer): + assert_are_tokens_of_type(lexer, + '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', + Number.Float) + assert_tokens_match(lexer, + '1e2.1e2', + ((Number.Float, '1e2'), (Number.Float, '.1e2'))) + + +def test_can_reject_almost_float(lexer): + assert_tokens_match(lexer, '.e1', ((Punctuation, '.'), (Name, 'e1'))) + + +def test_can_lex_integer(lexer): + assert_are_tokens_of_type(lexer, '1 23 456', Number.Integer) + + +def test_can_lex_names(lexer): + assert_are_tokens_of_type(lexer, u'thingy thingy123 _thingy _123', Name) + + +def test_can_recover_after_unterminated_string(lexer): + assert_tokens_match(lexer, + '"x\nx', + ((String.Double, '"'), (String.Double, 'x'), + (Error, '\n'), (Name, 'x'))) + + +def test_can_recover_from_invalid_character(lexer): + assert_tokens_match(lexer, + 'a;bc\nd', + ((Name, 'a'), (Error, ';bc\n'), (Name, 'd'))) diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py index 0d20fc24..0db65b51 100644 --- a/tests/test_basic_api.py +++ b/tests/test_basic_api.py @@ -3,14 +3,13 @@ Pygments basic API tests ~~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function import random -import unittest from os import path import pytest @@ -253,7 +252,7 @@ def test_bare_class_handler(): assert False, 'nothing raised' -class FiltersTest(unittest.TestCase): +class TestFilters(object): def test_basic(self): filters_args = [ @@ -272,19 +271,18 @@ class FiltersTest(unittest.TestCase): with open(TESTFILE, 'rb') as fp: text = fp.read().decode('utf-8') tokens = list(lx.get_tokens(text)) - self.assertTrue(all(isinstance(t[1], text_type) - for t in tokens), - '%s filter did not return Unicode' % x) + assert all(isinstance(t[1], text_type) for t in tokens), \ + '%s filter did not return Unicode' % x roundtext = ''.join([t[1] for t in tokens]) if x not in ('whitespace', 'keywordcase', 'gobble'): # these filters change the text - self.assertEqual(roundtext, text, - "lexer roundtrip with %s filter failed" % x) + assert roundtext == text, \ + "lexer roundtrip with %s filter failed" % x def test_raiseonerror(self): lx = lexers.PythonLexer() lx.add_filter('raiseonerror', excclass=RuntimeError) - self.assertRaises(RuntimeError, list, lx.get_tokens('$')) + assert pytest.raises(RuntimeError, list, lx.get_tokens('$')) def test_whitespace(self): lx = lexers.PythonLexer() @@ -292,7 +290,7 @@ class FiltersTest(unittest.TestCase): with open(TESTFILE, 'rb') as fp: text = fp.read().decode('utf-8') lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))]) - self.assertFalse(' ' in lxtext) + assert ' ' not in lxtext def test_keywordcase(self): lx = lexers.PythonLexer() @@ -300,15 +298,15 @@ class FiltersTest(unittest.TestCase): with open(TESTFILE, 'rb') as fp: text = fp.read().decode('utf-8') lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))]) - self.assertTrue('Def' in lxtext and 'Class' in lxtext) + assert 'Def' in lxtext and 'Class' in lxtext def test_codetag(self): lx = lexers.PythonLexer() lx.add_filter('codetagify') text = u'# BUG: text' tokens = list(lx.get_tokens(text)) - self.assertEqual('# ', tokens[0][1]) - self.assertEqual('BUG', tokens[1][1]) + assert '# ' == tokens[0][1] + assert 'BUG' == tokens[1][1] def test_codetag_boundary(self): # ticket #368 @@ -316,4 +314,4 @@ class FiltersTest(unittest.TestCase): lx.add_filter('codetagify') text = u'# DEBUG: text' tokens = list(lx.get_tokens(text)) - self.assertEqual('# DEBUG: text', tokens[0][1]) + assert '# DEBUG: text' == tokens[0][1] diff --git a/tests/test_bibtex.py b/tests/test_bibtex.py index 5ad92db4..756a6589 100644 --- a/tests/test_bibtex.py +++ b/tests/test_bibtex.py @@ -3,234 +3,238 @@ BibTeX Test ~~~~~~~~~~~ - :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import textwrap -import unittest + +import pytest from pygments.lexers import BibTeXLexer, BSTLexer from pygments.token import Token -class BibTeXTest(unittest.TestCase): - def setUp(self): - self.lexer = BibTeXLexer() - - def testPreamble(self): - data = u'@PREAMBLE{"% some LaTeX code here"}' - tokens = [ - (Token.Name.Class, u'@PREAMBLE'), - (Token.Punctuation, u'{'), - (Token.String, u'"'), - (Token.String, u'% some LaTeX code here'), - (Token.String, u'"'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testString(self): - data = u'@STRING(SCI = "Science")' - tokens = [ - (Token.Name.Class, u'@STRING'), - (Token.Punctuation, u'('), - (Token.Name.Attribute, u'SCI'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Science'), - (Token.String, u'"'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testEntry(self): - data = u""" - This is a comment. - - @ARTICLE{ruckenstein-diffusion, - author = "Liu, Hongquin" # and # "Ruckenstein, Eli", - year = 1997, - month = JAN, - pages = "888-895" - } - """ - - tokens = [ - (Token.Comment, u'This is a comment.'), - (Token.Text, u'\n\n'), - (Token.Name.Class, u'@ARTICLE'), - (Token.Punctuation, u'{'), - (Token.Name.Label, u'ruckenstein-diffusion'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'author'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Liu, Hongquin'), - (Token.String, u'"'), - (Token.Text, u' '), - (Token.Punctuation, u'#'), - (Token.Text, u' '), - (Token.Name.Variable, u'and'), - (Token.Text, u' '), - (Token.Punctuation, u'#'), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'Ruckenstein, Eli'), - (Token.String, u'"'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'year'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.Number, u'1997'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'month'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.Name.Variable, u'JAN'), - (Token.Punctuation, u','), - (Token.Text, u'\n '), - (Token.Name.Attribute, u'pages'), - (Token.Text, u' '), - (Token.Punctuation, u'='), - (Token.Text, u' '), - (Token.String, u'"'), - (Token.String, u'888-895'), - (Token.String, u'"'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens) - - def testComment(self): - data = '@COMMENT{test}' - tokens = [ - (Token.Comment, u'@COMMENT'), - (Token.Comment, u'{test}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testMissingBody(self): - data = '@ARTICLE xxx' - tokens = [ - (Token.Name.Class, u'@ARTICLE'), - (Token.Text, u' '), - (Token.Error, u'x'), - (Token.Error, u'x'), - (Token.Error, u'x'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - def testMismatchedBrace(self): - data = '@PREAMBLE(""}' - tokens = [ - (Token.Name.Class, u'@PREAMBLE'), - (Token.Punctuation, u'('), - (Token.String, u'"'), - (Token.String, u'"'), - (Token.Error, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(data)), tokens) - - -class BSTTest(unittest.TestCase): - def setUp(self): - self.lexer = BSTLexer() - - def testBasicBST(self): - data = """ - % BibTeX standard bibliography style `plain' - - INTEGERS { output.state before.all } - - FUNCTION {sort.format.title} - { 't := - "A " #2 - "An " #3 - "The " #4 t chop.word - chop.word +@pytest.fixture(scope='module') +def lexer(): + yield BibTeXLexer() + + +def test_preamble(lexer): + data = u'@PREAMBLE{"% some LaTeX code here"}' + tokens = [ + (Token.Name.Class, u'@PREAMBLE'), + (Token.Punctuation, u'{'), + (Token.String, u'"'), + (Token.String, u'% some LaTeX code here'), + (Token.String, u'"'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_string(lexer): + data = u'@STRING(SCI = "Science")' + tokens = [ + (Token.Name.Class, u'@STRING'), + (Token.Punctuation, u'('), + (Token.Name.Attribute, u'SCI'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Science'), + (Token.String, u'"'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_entry(lexer): + data = u""" + This is a comment. + + @ARTICLE{ruckenstein-diffusion, + author = "Liu, Hongquin" # and # "Ruckenstein, Eli", + year = 1997, + month = JAN, + pages = "888-895" + } + """ + + tokens = [ + (Token.Comment, u'This is a comment.'), + (Token.Text, u'\n\n'), + (Token.Name.Class, u'@ARTICLE'), + (Token.Punctuation, u'{'), + (Token.Name.Label, u'ruckenstein-diffusion'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'author'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Liu, Hongquin'), + (Token.String, u'"'), + (Token.Text, u' '), + (Token.Punctuation, u'#'), + (Token.Text, u' '), + (Token.Name.Variable, u'and'), + (Token.Text, u' '), + (Token.Punctuation, u'#'), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'Ruckenstein, Eli'), + (Token.String, u'"'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'year'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.Number, u'1997'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'month'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.Name.Variable, u'JAN'), + (Token.Punctuation, u','), + (Token.Text, u'\n '), + (Token.Name.Attribute, u'pages'), + (Token.Text, u' '), + (Token.Punctuation, u'='), + (Token.Text, u' '), + (Token.String, u'"'), + (Token.String, u'888-895'), + (Token.String, u'"'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens + + +def test_comment(lexer): + data = '@COMMENT{test}' + tokens = [ + (Token.Comment, u'@COMMENT'), + (Token.Comment, u'{test}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_missing_body(lexer): + data = '@ARTICLE xxx' + tokens = [ + (Token.Name.Class, u'@ARTICLE'), + (Token.Text, u' '), + (Token.Error, u'x'), + (Token.Error, u'x'), + (Token.Error, u'x'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_mismatched_brace(lexer): + data = '@PREAMBLE(""}' + tokens = [ + (Token.Name.Class, u'@PREAMBLE'), + (Token.Punctuation, u'('), + (Token.String, u'"'), + (Token.String, u'"'), + (Token.Error, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(data)) == tokens + + +def test_basic_bst(): + lexer = BSTLexer() + data = """ + % BibTeX standard bibliography style `plain' + + INTEGERS { output.state before.all } + + FUNCTION {sort.format.title} + { 't := + "A " #2 + "An " #3 + "The " #4 t chop.word chop.word - sortify - #1 global.max$ substring$ - } - - ITERATE {call.type$} - """ - tokens = [ - (Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"), - (Token.Text, u'\n\n'), - (Token.Keyword, u'INTEGERS'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Text, u' '), - (Token.Name.Variable, u'output.state'), - (Token.Text, u' '), - (Token.Name.Variable, u'before.all'), - (Token.Text, u' '), - (Token.Punctuation, u'}'), - (Token.Text, u'\n\n'), - (Token.Keyword, u'FUNCTION'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Name.Variable, u'sort.format.title'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u' '), - (Token.Name.Function, u"'t"), - (Token.Text, u' '), - (Token.Name.Variable, u':='), - (Token.Text, u'\n'), - (Token.Literal.String, u'"A "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#2'), - (Token.Text, u'\n '), - (Token.Literal.String, u'"An "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#3'), - (Token.Text, u'\n '), - (Token.Literal.String, u'"The "'), - (Token.Text, u' '), - (Token.Literal.Number, u'#4'), - (Token.Text, u' '), - (Token.Name.Variable, u't'), - (Token.Text, u' '), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n '), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n'), - (Token.Name.Variable, u'chop.word'), - (Token.Text, u'\n'), - (Token.Name.Variable, u'sortify'), - (Token.Text, u'\n'), - (Token.Literal.Number, u'#1'), - (Token.Text, u' '), - (Token.Name.Builtin, u'global.max$'), - (Token.Text, u' '), - (Token.Name.Builtin, u'substring$'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n\n'), - (Token.Keyword, u'ITERATE'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Name.Builtin, u'call.type$'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens) + chop.word + sortify + #1 global.max$ substring$ + } + + ITERATE {call.type$} + """ + tokens = [ + (Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"), + (Token.Text, u'\n\n'), + (Token.Keyword, u'INTEGERS'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u' '), + (Token.Name.Variable, u'output.state'), + (Token.Text, u' '), + (Token.Name.Variable, u'before.all'), + (Token.Text, u' '), + (Token.Punctuation, u'}'), + (Token.Text, u'\n\n'), + (Token.Keyword, u'FUNCTION'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Name.Variable, u'sort.format.title'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u' '), + (Token.Name.Function, u"'t"), + (Token.Text, u' '), + (Token.Name.Variable, u':='), + (Token.Text, u'\n'), + (Token.Literal.String, u'"A "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#2'), + (Token.Text, u'\n '), + (Token.Literal.String, u'"An "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#3'), + (Token.Text, u'\n '), + (Token.Literal.String, u'"The "'), + (Token.Text, u' '), + (Token.Literal.Number, u'#4'), + (Token.Text, u' '), + (Token.Name.Variable, u't'), + (Token.Text, u' '), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n '), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n'), + (Token.Name.Variable, u'chop.word'), + (Token.Text, u'\n'), + (Token.Name.Variable, u'sortify'), + (Token.Text, u'\n'), + (Token.Literal.Number, u'#1'), + (Token.Text, u' '), + (Token.Name.Builtin, u'global.max$'), + (Token.Text, u' '), + (Token.Name.Builtin, u'substring$'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n\n'), + (Token.Keyword, u'ITERATE'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Name.Builtin, u'call.type$'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(data))) == tokens diff --git a/tests/test_cfm.py b/tests/test_cfm.py index 401ac78c..e3175215 100644 --- a/tests/test_cfm.py +++ b/tests/test_cfm.py @@ -3,43 +3,44 @@ Basic ColdfusionHtmlLexer Test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import unittest +import pytest from pygments.token import Token from pygments.lexers import ColdfusionHtmlLexer -class ColdfusionHtmlLexerTest(unittest.TestCase): - - def setUp(self): - self.lexer = ColdfusionHtmlLexer() - - def testBasicComment(self): - fragment = u'' - expected = [ - (Token.Text, u''), - (Token.Comment.Multiline, u''), - (Token.Text, u'\n'), - ] - self.assertEqual(expected, list(self.lexer.get_tokens(fragment))) - - def testNestedComment(self): - fragment = u' --->' - expected = [ - (Token.Text, u''), - (Token.Comment.Multiline, u''), - (Token.Comment.Multiline, u' '), - (Token.Comment.Multiline, u'--->'), - (Token.Text, u'\n'), - ] - self.assertEqual(expected, list(self.lexer.get_tokens(fragment))) +@pytest.fixture(scope='module') +def lexer(): + yield ColdfusionHtmlLexer() + + +def test_basic_comment(lexer): + fragment = u'' + expected = [ + (Token.Text, u''), + (Token.Comment.Multiline, u''), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == expected + + +def test_nested_comment(lexer): + fragment = u' --->' + expected = [ + (Token.Text, u''), + (Token.Comment.Multiline, u''), + (Token.Comment.Multiline, u' '), + (Token.Comment.Multiline, u'--->'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == expected diff --git a/tests/test_clexer.py b/tests/test_clexer.py index 8841bf3e..a598096e 100644 --- a/tests/test_clexer.py +++ b/tests/test_clexer.py @@ -7,252 +7,259 @@ :license: BSD, see LICENSE for details. """ -import unittest import textwrap +import pytest + from pygments.token import Text, Number, Token from pygments.lexers import CLexer -class CLexerTest(unittest.TestCase): +@pytest.fixture(scope='module') +def lexer(): + yield CLexer() - def setUp(self): - self.lexer = CLexer() - def testNumbers(self): - code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23' - wanted = [] - for item in zip([Number.Integer, Number.Float, Number.Float, - Number.Float, Number.Oct, Number.Hex, - Number.Float, Number.Float], code.split()): - wanted.append(item) - wanted.append((Text, ' ')) - wanted = wanted[:-1] + [(Text, '\n')] - self.assertEqual(list(self.lexer.get_tokens(code)), wanted) +def test_numbers(lexer): + code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23' + wanted = [] + for item in zip([Number.Integer, Number.Float, Number.Float, + Number.Float, Number.Oct, Number.Hex, + Number.Float, Number.Float], code.split()): + wanted.append(item) + wanted.append((Text, ' ')) + wanted = wanted[:-1] + [(Text, '\n')] + assert list(lexer.get_tokens(code)) == wanted - def testSwitch(self): - fragment = u'''\ - int main() - { - switch (0) - { - case 0: - default: - ; - } - } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'switch'), - (Token.Text, u' '), - (Token.Punctuation, u'('), - (Token.Literal.Number.Integer, u'0'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'case'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'default'), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) - def testSwitchSpaceBeforeColon(self): - fragment = u'''\ - int main() +def test_switch(lexer): + fragment = u'''\ + int main() + { + switch (0) { - switch (0) - { - case 0 : - default : - ; - } + case 0: + default: + ; } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'switch'), - (Token.Text, u' '), - (Token.Punctuation, u'('), - (Token.Literal.Number.Integer, u'0'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'case'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Text, u' '), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'default'), - (Token.Text, u' '), - (Token.Operator, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'switch'), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Literal.Number.Integer, u'0'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'case'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'default'), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens - def testLabel(self): - fragment = u'''\ - int main() - { - foo: - goto foo; - } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Name.Label, u'foo'), - (Token.Punctuation, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'goto'), - (Token.Text, u' '), - (Token.Name, u'foo'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) - def testLabelSpaceBeforeColon(self): - fragment = u'''\ - int main() +def test_switch_space_before_colon(lexer): + fragment = u'''\ + int main() + { + switch (0) { - foo : - goto foo; + case 0 : + default : + ; } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Name.Label, u'foo'), - (Token.Text, u' '), - (Token.Punctuation, u':'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'goto'), - (Token.Text, u' '), - (Token.Name, u'foo'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'switch'), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Literal.Number.Integer, u'0'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'case'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Text, u' '), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'default'), + (Token.Text, u' '), + (Token.Operator, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens + + +def test_label(lexer): + fragment = u'''\ + int main() + { + foo: + goto foo; + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Name.Label, u'foo'), + (Token.Punctuation, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'goto'), + (Token.Text, u' '), + (Token.Name, u'foo'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens + + +def test_label_space_before_colon(lexer): + fragment = u'''\ + int main() + { + foo : + goto foo; + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Name.Label, u'foo'), + (Token.Text, u' '), + (Token.Punctuation, u':'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'goto'), + (Token.Text, u' '), + (Token.Name, u'foo'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens + + +def test_label_followed_by_statement(lexer): + fragment = u'''\ + int main() + { + foo:return 0; + goto foo; + } + ''' + tokens = [ + (Token.Keyword.Type, u'int'), + (Token.Text, u' '), + (Token.Name.Function, u'main'), + (Token.Punctuation, u'('), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Punctuation, u'{'), + (Token.Text, u'\n'), + (Token.Name.Label, u'foo'), + (Token.Punctuation, u':'), + (Token.Keyword, u'return'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'goto'), + (Token.Text, u' '), + (Token.Name, u'foo'), + (Token.Punctuation, u';'), + (Token.Text, u'\n'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens - def testLabelFollowedByStatement(self): - fragment = u'''\ - int main() - { - foo:return 0; - goto foo; - } - ''' - tokens = [ - (Token.Keyword.Type, u'int'), - (Token.Text, u' '), - (Token.Name.Function, u'main'), - (Token.Punctuation, u'('), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Punctuation, u'{'), - (Token.Text, u'\n'), - (Token.Name.Label, u'foo'), - (Token.Punctuation, u':'), - (Token.Keyword, u'return'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'goto'), - (Token.Text, u' '), - (Token.Name, u'foo'), - (Token.Punctuation, u';'), - (Token.Text, u'\n'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(textwrap.dedent(fragment)))) - def testPreprocFile(self): - fragment = u'#include \n' - tokens = [ - (Token.Comment.Preproc, u'#'), - (Token.Comment.Preproc, u'include'), - (Token.Text, u' '), - (Token.Comment.PreprocFile, u''), - (Token.Comment.Preproc, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_preproc_file(lexer): + fragment = u'#include \n' + tokens = [ + (Token.Comment.Preproc, u'#'), + (Token.Comment.Preproc, u'include'), + (Token.Text, u' '), + (Token.Comment.PreprocFile, u''), + (Token.Comment.Preproc, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testPreprocFile2(self): - fragment = u'#include "foo.h"\n' - tokens = [ - (Token.Comment.Preproc, u'#'), - (Token.Comment.Preproc, u'include'), - (Token.Text, u' '), - (Token.Comment.PreprocFile, u'"foo.h"'), - (Token.Comment.Preproc, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_preproc_file2(lexer): + fragment = u'#include "foo.h"\n' + tokens = [ + (Token.Comment.Preproc, u'#'), + (Token.Comment.Preproc, u'include'), + (Token.Text, u' '), + (Token.Comment.PreprocFile, u'"foo.h"'), + (Token.Comment.Preproc, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py index 11b53896..d56e2ae5 100644 --- a/tests/test_cmdline.py +++ b/tests/test_cmdline.py @@ -3,7 +3,7 @@ Command line test ~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -14,9 +14,10 @@ import os import re import sys import tempfile -import unittest from os import path +from pytest import raises + from pygments import cmdline, highlight from pygments.util import BytesIO, StringIO @@ -66,249 +67,253 @@ def run_cmdline(*args, **kwds): return (ret, _decode_output(out), _decode_output(err)) -class CmdLineTest(unittest.TestCase): - - def check_success(self, *cmdline, **kwds): - code, out, err = run_cmdline(*cmdline, **kwds) - self.assertEqual(code, 0) - self.assertEqual(err, '') - return out - - def check_failure(self, *cmdline, **kwds): - expected_code = kwds.pop('code', 1) - code, out, err = run_cmdline(*cmdline, **kwds) - self.assertEqual(code, expected_code) - self.assertEqual(out, '') - return err - - def test_normal(self): - # test that cmdline gives the same output as library api - from pygments.lexers import PythonLexer - from pygments.formatters import HtmlFormatter - filename = TESTFILE - with open(filename, 'rb') as fp: - code = fp.read() - - output = highlight(code, PythonLexer(), HtmlFormatter()) - - o = self.check_success('-lpython', '-fhtml', filename) - self.assertEqual(o, output) - - def test_stdin(self): - o = self.check_success('-lpython', '-fhtml', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - # guess if no lexer given - o = self.check_success('-fhtml', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - def test_outfile(self): - # test that output file works with and without encoding - fd, name = tempfile.mkstemp() - os.close(fd) - for opts in [['-fhtml', '-o', name, TESTFILE], - ['-flatex', '-o', name, TESTFILE], - ['-fhtml', '-o', name, '-O', 'encoding=utf-8', TESTFILE]]: - try: - self.check_success(*opts) - finally: - os.unlink(name) - - def test_load_from_file(self): - lexer_file = os.path.join(TESTDIR, 'support', 'python_lexer.py') - formatter_file = os.path.join(TESTDIR, 'support', 'html_formatter.py') - - # By default, use CustomLexer - o = self.check_success('-l', lexer_file, '-f', 'html', - '-x', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - # If user specifies a name, use it - o = self.check_success('-f', 'html', '-x', '-l', - lexer_file + ':LexerWrapper', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - # Should also work for formatters - o = self.check_success('-lpython', '-f', - formatter_file + ':HtmlFormatterWrapper', - '-x', stdin=TESTCODE) - o = re.sub('<[^>]*>', '', o) - # rstrip is necessary since HTML inserts a \n after the last - self.assertEqual(o.rstrip(), TESTCODE.rstrip()) - - def test_stream_opt(self): - o = self.check_success('-lpython', '-s', '-fterminal', stdin=TESTCODE) - o = re.sub(r'\x1b\[.*?m', '', o) - self.assertEqual(o.replace('\r\n', '\n'), TESTCODE) - - def test_h_opt(self): - o = self.check_success('-h') - self.assertTrue('Usage:' in o) - - def test_L_opt(self): - o = self.check_success('-L') - self.assertTrue('Lexers' in o and 'Formatters' in o and - 'Filters' in o and 'Styles' in o) - o = self.check_success('-L', 'lexer') - self.assertTrue('Lexers' in o and 'Formatters' not in o) - self.check_success('-L', 'lexers') - - def test_O_opt(self): - filename = TESTFILE - o = self.check_success('-Ofull=1,linenos=true,foo=bar', - '-fhtml', filename) - self.assertTrue('foo, bar=baz=,' in o) - - def test_F_opt(self): - filename = TESTFILE - o = self.check_success('-Fhighlight:tokentype=Name.Blubb,' - 'names=TESTFILE filename', - '-fhtml', filename) - self.assertTrue('foo, bar=baz=,' in o + + +def test_F_opt(): + filename = TESTFILE + o = check_success('-Fhighlight:tokentype=Name.Blubb,' + 'names=TESTFILE filename', '-fhtml', filename) + assert '(other : self) : Int\n' + '{%for field in %w(first_name middle_name last_name)%}\n' + 'cmp={{field.id}}<=>other.{{field.id}}\n' + 'return cmp if cmp!=0\n' + '{%end%}\n' + '0\n' + 'end\n') + tokens = [ + (Keyword, 'def'), + (Name.Function, '<=>'), + (Punctuation, '('), + (Name, 'other'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Keyword.Pseudo, 'self'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name.Builtin, 'Int'), + (Text, '\n'), + (String.Interpol, '{%'), + (Keyword, 'for'), + (Text, ' '), + (Name, 'field'), + (Text, ' '), + (Keyword, 'in'), + (Text, ' '), + (String.Other, '%w('), + (String.Other, 'first_name middle_name last_name'), + (String.Other, ')'), + (String.Interpol, '%}'), + (Text, '\n'), + (Name, 'cmp'), + (Operator, '='), + (String.Interpol, '{{'), + (Name, 'field'), + (Operator, '.'), + (Name, 'id'), + (String.Interpol, '}}'), + (Operator, '<=>'), + (Name, 'other'), + (Operator, '.'), + (String.Interpol, '{{'), + (Name, 'field'), + (Operator, '.'), + (Name, 'id'), + (String.Interpol, '}}'), + (Text, '\n'), + (Keyword, 'return'), + (Text, ' '), + (Name, 'cmp'), + (Text, ' '), + (Keyword, 'if'), + (Text, ' '), + (Name, 'cmp'), + (Operator, '!='), + (Number.Integer, '0'), + (Text, '\n'), + (String.Interpol, '{%'), + (Keyword, 'end'), + (String.Interpol, '%}'), + (Text, '\n'), + (Number.Integer, '0'), + (Text, '\n'), + (Keyword, 'end'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testChars(self): - for fragment in ["'a'", "'я'", "'\\u{1234}'", "'\n'"]: - self.assertEqual([(String.Char, fragment), (Text, '\n')], - list(self.lexer.get_tokens(fragment + '\n'))) - self.assertEqual(next(self.lexer.get_tokens("'abc'"))[0], Error) - def testMacro(self): - fragment = ( - 'def<=>(other : self) : Int\n' - '{%for field in %w(first_name middle_name last_name)%}\n' - 'cmp={{field.id}}<=>other.{{field.id}}\n' - 'return cmp if cmp!=0\n' - '{%end%}\n' - '0\n' - 'end\n') - tokens = [ - (Keyword, 'def'), - (Name.Function, '<=>'), - (Punctuation, '('), - (Name, 'other'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Keyword.Pseudo, 'self'), - (Punctuation, ')'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Name.Builtin, 'Int'), - (Text, '\n'), - (String.Interpol, '{%'), - (Keyword, 'for'), - (Text, ' '), - (Name, 'field'), - (Text, ' '), - (Keyword, 'in'), - (Text, ' '), - (String.Other, '%w('), - (String.Other, 'first_name middle_name last_name'), - (String.Other, ')'), - (String.Interpol, '%}'), - (Text, '\n'), - (Name, 'cmp'), - (Operator, '='), - (String.Interpol, '{{'), - (Name, 'field'), - (Operator, '.'), - (Name, 'id'), - (String.Interpol, '}}'), - (Operator, '<=>'), - (Name, 'other'), - (Operator, '.'), - (String.Interpol, '{{'), - (Name, 'field'), - (Operator, '.'), - (Name, 'id'), - (String.Interpol, '}}'), - (Text, '\n'), - (Keyword, 'return'), - (Text, ' '), - (Name, 'cmp'), - (Text, ' '), - (Keyword, 'if'), - (Text, ' '), - (Name, 'cmp'), - (Operator, '!='), - (Number.Integer, '0'), - (Text, '\n'), - (String.Interpol, '{%'), - (Keyword, 'end'), - (String.Interpol, '%}'), - (Text, '\n'), - (Number.Integer, '0'), - (Text, '\n'), - (Keyword, 'end'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_lib(lexer): + fragment = ( + '@[Link("some")]\nlib LibSome\n' + '@[CallConvention("X86_StdCall")]\nfun foo="some.foo"(thing : Void*) : LibC::Int\n' + 'end\n') + tokens = [ + (Operator, '@['), + (Name.Decorator, 'Link'), + (Punctuation, '('), + (String.Double, '"'), + (String.Double, 'some'), + (String.Double, '"'), + (Punctuation, ')'), + (Operator, ']'), + (Text, '\n'), + (Keyword, 'lib'), + (Text, ' '), + (Name.Namespace, 'LibSome'), + (Text, '\n'), + (Operator, '@['), + (Name.Decorator, 'CallConvention'), + (Punctuation, '('), + (String.Double, '"'), + (String.Double, 'X86_StdCall'), + (String.Double, '"'), + (Punctuation, ')'), + (Operator, ']'), + (Text, '\n'), + (Keyword, 'fun'), + (Text, ' '), + (Name.Function, 'foo'), + (Operator, '='), + (String.Double, '"'), + (String.Double, 'some.foo'), + (String.Double, '"'), + (Punctuation, '('), + (Name, 'thing'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name.Builtin, 'Void'), + (Operator, '*'), + (Punctuation, ')'), + (Text, ' '), + (Punctuation, ':'), + (Text, ' '), + (Name, 'LibC'), + (Operator, '::'), + (Name.Builtin, 'Int'), + (Text, '\n'), + (Keyword, 'end'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testLib(self): - fragment = ( - '@[Link("some")]\nlib LibSome\n' - '@[CallConvention("X86_StdCall")]\nfun foo="some.foo"(thing : Void*) : LibC::Int\n' - 'end\n') - tokens = [ - (Operator, '@['), - (Name.Decorator, 'Link'), - (Punctuation, '('), - (String.Double, '"'), - (String.Double, 'some'), - (String.Double, '"'), - (Punctuation, ')'), - (Operator, ']'), - (Text, '\n'), - (Keyword, 'lib'), - (Text, ' '), - (Name.Namespace, 'LibSome'), - (Text, '\n'), - (Operator, '@['), - (Name.Decorator, 'CallConvention'), - (Punctuation, '('), - (String.Double, '"'), - (String.Double, 'X86_StdCall'), - (String.Double, '"'), - (Punctuation, ')'), - (Operator, ']'), - (Text, '\n'), - (Keyword, 'fun'), - (Text, ' '), - (Name.Function, 'foo'), - (Operator, '='), - (String.Double, '"'), - (String.Double, 'some.foo'), - (String.Double, '"'), - (Punctuation, '('), - (Name, 'thing'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Name.Builtin, 'Void'), - (Operator, '*'), - (Punctuation, ')'), - (Text, ' '), - (Punctuation, ':'), - (Text, ' '), - (Name, 'LibC'), - (Operator, '::'), - (Name.Builtin, 'Int'), - (Text, '\n'), - (Keyword, 'end'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testEscapedBracestring(self): - fragment = 'str.gsub(%r{\\\\\\\\}, "/")\n' - tokens = [ - (Name, 'str'), - (Operator, '.'), - (Name, 'gsub'), - (Punctuation, '('), - (String.Regex, '%r{'), - (String.Regex, '\\\\'), - (String.Regex, '\\\\'), - (String.Regex, '}'), - (Punctuation, ','), - (Text, ' '), - (String.Double, '"'), - (String.Double, '/'), - (String.Double, '"'), - (Punctuation, ')'), - (Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_escaped_bracestring(lexer): + fragment = 'str.gsub(%r{\\\\\\\\}, "/")\n' + tokens = [ + (Name, 'str'), + (Operator, '.'), + (Name, 'gsub'), + (Punctuation, '('), + (String.Regex, '%r{'), + (String.Regex, '\\\\'), + (String.Regex, '\\\\'), + (String.Regex, '}'), + (Punctuation, ','), + (Text, ' '), + (String.Double, '"'), + (String.Double, '/'), + (String.Double, '"'), + (Punctuation, ')'), + (Text, '\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_csound.py b/tests/test_csound.py index d493bd04..a4ddaaeb 100644 --- a/tests/test_csound.py +++ b/tests/test_csound.py @@ -1,491 +1,510 @@ # -*- coding: utf-8 -*- """ Csound lexer tests - ~~~~~~~~~~~~~~~~~~~~ + ~~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ -import unittest from textwrap import dedent -from pygments.token import Comment, Error, Keyword, Name, Number, Operator, Punctuation, \ - String, Text +import pytest + +from pygments.token import Comment, Error, Keyword, Name, Number, Operator, \ + Punctuation, String, Text from pygments.lexers import CsoundOrchestraLexer -class CsoundOrchestraTest(unittest.TestCase): +@pytest.fixture(scope='module') +def lexer(): + yield CsoundOrchestraLexer() + + +def test_comments(lexer): + fragment = dedent('''\ + /* + * comment + */ + ; comment + // comment + ''') + tokens = [ + (Comment.Multiline, u'/*\n * comment\n */'), + (Text, u'\n'), + (Comment.Single, u'; comment'), + (Text, u'\n'), + (Comment.Single, u'// comment'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_instrument_blocks(lexer): + fragment = dedent('''\ + instr/**/1,/**/N_a_M_e_,/**/+Name/**/// + iDuration = p3 + outc:a(aSignal) + endin + ''') + tokens = [ + (Keyword.Declaration, u'instr'), + (Comment.Multiline, u'/**/'), + (Name.Function, u'1'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Name.Function, u'N_a_M_e_'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Punctuation, u'+'), + (Name.Function, u'Name'), + (Comment.Multiline, u'/**/'), + (Comment.Single, u'//'), + (Text, u'\n'), + (Text, u' '), + (Keyword.Type, u'i'), + (Name, u'Duration'), + (Text, u' '), + (Operator, u'='), + (Text, u' '), + (Name.Variable.Instance, u'p3'), + (Text, u'\n'), + (Text, u' '), + (Name.Builtin, u'outc'), + (Punctuation, u':'), + (Keyword.Type, u'a'), + (Punctuation, u'('), + (Keyword.Type, u'a'), + (Name, u'Signal'), + (Punctuation, u')'), + (Text, u'\n'), + (Keyword.Declaration, u'endin'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens - def setUp(self): - self.lexer = CsoundOrchestraLexer() - self.maxDiff = None - def testComments(self): - fragment = dedent('''\ - /* - * comment - */ - ; comment - // comment - ''') +def test_user_defined_opcodes(lexer): + fragment = dedent('''\ + opcode/**/aUDO,/**/i[],/**/aik// + aUDO + endop + ''') + tokens = [ + (Keyword.Declaration, u'opcode'), + (Comment.Multiline, u'/**/'), + (Name.Function, u'aUDO'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Keyword.Type, u'i[]'), + (Punctuation, u','), + (Comment.Multiline, u'/**/'), + (Keyword.Type, u'aik'), + (Comment.Single, u'//'), + (Text, u'\n'), + (Text, u' '), + (Name.Function, u'aUDO'), + (Text, u'\n'), + (Keyword.Declaration, u'endop'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_numbers(lexer): + fragment = '123 0123456789' + tokens = [ + (Number.Integer, u'123'), + (Text, u' '), + (Number.Integer, u'0123456789'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + fragment = '0xabcdef0123456789 0XABCDEF' + tokens = [ + (Keyword.Type, u'0x'), + (Number.Hex, u'abcdef0123456789'), + (Text, u' '), + (Keyword.Type, u'0X'), + (Number.Hex, u'ABCDEF'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + fragments = ['1e2', '3e+4', '5e-6', '7E8', '9E+0', '1E-2', '3.', '4.56', '.789'] + for fragment in fragments: tokens = [ - (Comment.Multiline, u'/*\n * comment\n */'), - (Text, u'\n'), - (Comment.Single, u'; comment'), - (Text, u'\n'), - (Comment.Single, u'// comment'), + (Number.Float, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testInstrumentBlocks(self): - fragment = dedent('''\ - instr/**/1,/**/N_a_M_e_,/**/+Name/**/// - iDuration = p3 - outc:a(aSignal) - endin - ''') + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_quoted_strings(lexer): + fragment = '"characters$MACRO."' + tokens = [ + (String, u'"'), + (String, u'characters'), + (Comment.Preproc, u'$MACRO.'), + (String, u'"'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_braced_strings(lexer): + fragment = dedent('''\ + {{ + characters$MACRO. + }} + ''') + tokens = [ + (String, u'{{'), + (String, u'\ncharacters$MACRO.\n'), + (String, u'}}'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_escape_sequences(lexer): + for character in ['\\', 'a', 'b', 'n', 'r', 't', '"', '012', '345', '67']: + escapedCharacter = '\\' + character + fragment = '"' + escapedCharacter + '"' tokens = [ - (Keyword.Declaration, u'instr'), - (Comment.Multiline, u'/**/'), - (Name.Function, u'1'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Name.Function, u'N_a_M_e_'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Punctuation, u'+'), - (Name.Function, u'Name'), - (Comment.Multiline, u'/**/'), - (Comment.Single, u'//'), - (Text, u'\n'), - (Text, u' '), - (Keyword.Type, u'i'), - (Name, u'Duration'), - (Text, u' '), - (Operator, u'='), - (Text, u' '), - (Name.Variable.Instance, u'p3'), - (Text, u'\n'), - (Text, u' '), - (Name.Builtin, u'outc'), - (Punctuation, u':'), - (Keyword.Type, u'a'), - (Punctuation, u'('), - (Keyword.Type, u'a'), - (Name, u'Signal'), - (Punctuation, u')'), - (Text, u'\n'), - (Keyword.Declaration, u'endin'), + (String, u'"'), + (String.Escape, escapedCharacter), + (String, u'"'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testUserDefinedOpcodes(self): - fragment = dedent('''\ - opcode/**/aUDO,/**/i[],/**/aik// - aUDO - endop - ''') + assert list(lexer.get_tokens(fragment)) == tokens + fragment = '{{' + escapedCharacter + '}}' tokens = [ - (Keyword.Declaration, u'opcode'), - (Comment.Multiline, u'/**/'), - (Name.Function, u'aUDO'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Keyword.Type, u'i[]'), - (Punctuation, u','), - (Comment.Multiline, u'/**/'), - (Keyword.Type, u'aik'), - (Comment.Single, u'//'), - (Text, u'\n'), - (Text, u' '), - (Name.Function, u'aUDO'), - (Text, u'\n'), - (Keyword.Declaration, u'endop'), + (String, u'{{'), + (String.Escape, escapedCharacter), + (String, u'}}'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens + - def testNumbers(self): - fragment = '123 0123456789' +def test_operators(lexer): + fragments = ['+', '-', '~', u'¬', '!', '*', '/', '^', '%', '<<', '>>', '<', '>', + '<=', '>=', '==', '!=', '&', '#', '|', '&&', '||', '?', ':', '+=', + '-=', '*=', '/='] + for fragment in fragments: tokens = [ - (Number.Integer, u'123'), - (Text, u' '), - (Number.Integer, u'0123456789'), + (Operator, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragment = '0xabcdef0123456789 0XABCDEF' + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_global_value_identifiers(lexer): + for fragment in ['0dbfs', 'A4', 'kr', 'ksmps', 'nchnls', 'nchnls_i', 'sr']: tokens = [ - (Keyword.Type, u'0x'), - (Number.Hex, u'abcdef0123456789'), - (Text, u' '), - (Keyword.Type, u'0X'), - (Number.Hex, u'ABCDEF'), + (Name.Variable.Global, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragments = ['1e2', '3e+4', '5e-6', '7E8', '9E+0', '1E-2', '3.', '4.56', '.789'] - for fragment in fragments: - tokens = [ - (Number.Float, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testQuotedStrings(self): - fragment = '"characters$MACRO."' + +def test_keywords(lexer): + fragments = ['do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', + 'kthen', 'od', 'then', 'until', 'while'] + for fragment in fragments: tokens = [ - (String, u'"'), - (String, u'characters'), - (Comment.Preproc, u'$MACRO.'), - (String, u'"'), + (Keyword, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testBracedStrings(self): - fragment = dedent('''\ - {{ - characters$MACRO. - }} - ''') + assert list(lexer.get_tokens(fragment)) == tokens + for fragment in ['return', 'rireturn']: tokens = [ - (String, u'{{'), - (String, u'\ncharacters$MACRO.\n'), - (String, u'}}'), + (Keyword.Pseudo, fragment), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens + - def testEscapeSequences(self): - for character in ['\\', 'a', 'b', 'n', 'r', 't', '"', '012', '345', '67']: - escapedCharacter = '\\' + character - fragment = '"' + escapedCharacter + '"' +def test_labels(lexer): + fragment = dedent('''\ + aLabel: + label2: + ''') + tokens = [ + (Name.Label, u'aLabel'), + (Punctuation, u':'), + (Text, u'\n'), + (Text, u' '), + (Name.Label, u'label2'), + (Punctuation, u':'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_printks_and_prints_escape_sequences(lexer): + escapedCharacters = ['%!', '%%', '%n', '%N', '%r', '%R', '%t', '%T', '\\\\a', + '\\\\A', '\\\\b', '\\\\B', '\\\\n', '\\\\N', '\\\\r', + '\\\\R', '\\\\t', '\\\\T'] + for opcode in ['printks', 'prints']: + for escapedCharacter in escapedCharacters: + fragment = opcode + ' "' + escapedCharacter + '"' tokens = [ + (Name.Builtin, opcode), + (Text, u' '), (String, u'"'), (String.Escape, escapedCharacter), (String, u'"'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragment = '{{' + escapedCharacter + '}}' - tokens = [ - (String, u'{{'), - (String.Escape, escapedCharacter), - (String, u'}}'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testOperators(self): - fragments = ['+', '-', '~', u'¬', '!', '*', '/', '^', '%', '<<', '>>', '<', '>', - '<=', '>=', '==', '!=', '&', '#', '|', '&&', '||', '?', ':', '+=', - '-=', '*=', '/='] - for fragment in fragments: - tokens = [ - (Operator, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testGlobalValueIdentifiers(self): - for fragment in ['0dbfs', 'A4', 'kr', 'ksmps', 'nchnls', 'nchnls_i', 'sr']: - tokens = [ - (Name.Variable.Global, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testKeywords(self): - fragments = ['do', 'else', 'elseif', 'endif', 'enduntil', 'fi', 'if', 'ithen', - 'kthen', 'od', 'then', 'until', 'while'] - for fragment in fragments: - tokens = [ - (Keyword, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for fragment in ['return', 'rireturn']: - tokens = [ - (Keyword.Pseudo, fragment), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testLabels(self): - fragment = dedent('''\ - aLabel: - label2: - ''') +def test_goto_statements(lexer): + for keyword in ['goto', 'igoto', 'kgoto']: + fragment = keyword + ' aLabel' tokens = [ - (Name.Label, u'aLabel'), - (Punctuation, u':'), - (Text, u'\n'), + (Keyword, keyword), (Text, u' '), - (Name.Label, u'label2'), - (Punctuation, u':'), + (Name.Label, u'aLabel'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testPrintksAndPrintsEscapeSequences(self): - escapedCharacters = ['%!', '%%', '%n', '%N', '%r', '%R', '%t', '%T', '\\\\a', - '\\\\A', '\\\\b', '\\\\B', '\\\\n', '\\\\N', '\\\\r', - '\\\\R', '\\\\t', '\\\\T'] - for opcode in ['printks', 'prints']: - for escapedCharacter in escapedCharacters: - fragment = opcode + ' "' + escapedCharacter + '"' - tokens = [ - (Name.Builtin, opcode), - (Text, u' '), - (String, u'"'), - (String.Escape, escapedCharacter), - (String, u'"'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testGotoStatements(self): - for keyword in ['goto', 'igoto', 'kgoto']: - fragment = keyword + ' aLabel' - tokens = [ - (Keyword, keyword), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for opcode in ['reinit', 'rigoto', 'tigoto']: - fragment = opcode + ' aLabel' - tokens = [ - (Keyword.Pseudo, opcode), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for opcode in ['cggoto', 'cigoto', 'cingoto', 'ckgoto', 'cngoto', 'cnkgoto']: - fragment = opcode + ' 1==0, aLabel' - tokens = [ - (Keyword.Pseudo, opcode), - (Text, u' '), - (Number.Integer, u'1'), - (Operator, u'=='), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - fragment = 'timout 0, 0, aLabel' + assert list(lexer.get_tokens(fragment)) == tokens + for opcode in ['reinit', 'rigoto', 'tigoto']: + fragment = opcode + ' aLabel' tokens = [ - (Keyword.Pseudo, 'timout'), + (Keyword.Pseudo, opcode), (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), + (Name.Label, u'aLabel'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + for opcode in ['cggoto', 'cigoto', 'cingoto', 'ckgoto', 'cngoto', 'cnkgoto']: + fragment = opcode + ' 1==0, aLabel' + tokens = [ + (Keyword.Pseudo, opcode), (Text, u' '), + (Number.Integer, u'1'), + (Operator, u'=='), (Number.Integer, u'0'), (Punctuation, u','), (Text, u' '), (Name.Label, u'aLabel'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - for opcode in ['loop_ge', 'loop_gt', 'loop_le', 'loop_lt']: - fragment = opcode + ' 0, 0, 0, aLabel' - tokens = [ - (Keyword.Pseudo, opcode), - (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Number.Integer, u'0'), - (Punctuation, u','), - (Text, u' '), - (Name.Label, u'aLabel'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testIncludeDirectives(self): - for character in ['"', '|']: - fragment = '#include/**/' + character + 'file.udo' + character - tokens = [ - (Comment.Preproc, u'#include'), - (Comment.Multiline, u'/**/'), - (String, character + u'file.udo' + character), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testObjectLikeMacroDefinitions(self): - fragment = dedent('''\ - # \tdefine MACRO#macro_body# - #define/**/ - MACRO/**/ - #\\#macro - body\\## - ''') + assert list(lexer.get_tokens(fragment)) == tokens + fragment = 'timout 0, 0, aLabel' + tokens = [ + (Keyword.Pseudo, 'timout'), + (Text, u' '), + (Number.Integer, u'0'), + (Punctuation, u','), + (Text, u' '), + (Number.Integer, u'0'), + (Punctuation, u','), + (Text, u' '), + (Name.Label, u'aLabel'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + for opcode in ['loop_ge', 'loop_gt', 'loop_le', 'loop_lt']: + fragment = opcode + ' 0, 0, 0, aLabel' tokens = [ - (Comment.Preproc, u'# \tdefine'), + (Keyword.Pseudo, opcode), (Text, u' '), - (Comment.Preproc, u'MACRO'), - (Punctuation, u'#'), - (Comment.Preproc, u'macro_body'), - (Punctuation, u'#'), - (Text, u'\n'), - (Comment.Preproc, u'#define'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Comment.Preproc, u'MACRO'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Punctuation, u'#'), - (Comment.Preproc, u'\\#'), - (Comment.Preproc, u'macro\nbody'), - (Comment.Preproc, u'\\#'), - (Punctuation, u'#'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testFunctionLikeMacroDefinitions(self): - fragment = dedent('''\ - #define MACRO(ARG1#ARG2) #macro_body# - #define/**/ - MACRO(ARG1'ARG2' ARG3)/**/ - #\\#macro - body\\## - ''') - tokens = [ - (Comment.Preproc, u'#define'), + (Number.Integer, u'0'), + (Punctuation, u','), (Text, u' '), - (Comment.Preproc, u'MACRO'), - (Punctuation, u'('), - (Comment.Preproc, u'ARG1'), - (Punctuation, u'#'), - (Comment.Preproc, u'ARG2'), - (Punctuation, u')'), + (Number.Integer, u'0'), + (Punctuation, u','), (Text, u' '), - (Punctuation, u'#'), - (Comment.Preproc, u'macro_body'), - (Punctuation, u'#'), - (Text, u'\n'), - (Comment.Preproc, u'#define'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Comment.Preproc, u'MACRO'), - (Punctuation, u'('), - (Comment.Preproc, u'ARG1'), - (Punctuation, u"'"), - (Comment.Preproc, u'ARG2'), - (Punctuation, u"'"), + (Number.Integer, u'0'), + (Punctuation, u','), (Text, u' '), - (Comment.Preproc, u'ARG3'), - (Punctuation, u')'), - (Comment.Multiline, u'/**/'), - (Text, u'\n'), - (Punctuation, u'#'), - (Comment.Preproc, u'\\#'), - (Comment.Preproc, u'macro\nbody'), - (Comment.Preproc, u'\\#'), - (Punctuation, u'#'), + (Name.Label, u'aLabel'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testMacroPreprocessorDirectives(self): - for directive in ['#ifdef', '#ifndef', '#undef']: - fragment = directive + ' MACRO' - tokens = [ - (Comment.Preproc, directive), - (Text, u' '), - (Comment.Preproc, u'MACRO'), - (Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testOtherPreprocessorDirectives(self): - fragment = dedent('''\ - #else - #end - #endif - ### - @ \t12345 - @@ \t67890 - ''') + +def test_include_directives(lexer): + for character in ['"', '|']: + fragment = '#include/**/' + character + 'file.udo' + character tokens = [ - (Comment.Preproc, u'#else'), - (Text, u'\n'), - (Comment.Preproc, u'#end'), - (Text, u'\n'), - (Comment.Preproc, u'#endif'), - (Text, u'\n'), - (Comment.Preproc, u'###'), - (Text, u'\n'), - (Comment.Preproc, u'@ \t12345'), - (Text, u'\n'), - (Comment.Preproc, u'@@ \t67890'), + (Comment.Preproc, u'#include'), + (Comment.Multiline, u'/**/'), + (String, character + u'file.udo' + character), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens + - def testFunctionLikeMacros(self): - fragment = "$MACRO.(((x#y\\)))' \"(#'x)\\)x\\))\"# {{x\\))x)\\)(#'}});" +def test_object_like_macro_definitions(lexer): + fragment = dedent('''\ + # \tdefine MACRO#macro_body# + #define/**/ + MACRO/**/ + #\\#macro + body\\## + ''') + tokens = [ + (Comment.Preproc, u'# \tdefine'), + (Text, u' '), + (Comment.Preproc, u'MACRO'), + (Punctuation, u'#'), + (Comment.Preproc, u'macro_body'), + (Punctuation, u'#'), + (Text, u'\n'), + (Comment.Preproc, u'#define'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Comment.Preproc, u'MACRO'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Punctuation, u'#'), + (Comment.Preproc, u'\\#'), + (Comment.Preproc, u'macro\nbody'), + (Comment.Preproc, u'\\#'), + (Punctuation, u'#'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_function_like_macro_definitions(lexer): + fragment = dedent('''\ + #define MACRO(ARG1#ARG2) #macro_body# + #define/**/ + MACRO(ARG1'ARG2' ARG3)/**/ + #\\#macro + body\\## + ''') + tokens = [ + (Comment.Preproc, u'#define'), + (Text, u' '), + (Comment.Preproc, u'MACRO'), + (Punctuation, u'('), + (Comment.Preproc, u'ARG1'), + (Punctuation, u'#'), + (Comment.Preproc, u'ARG2'), + (Punctuation, u')'), + (Text, u' '), + (Punctuation, u'#'), + (Comment.Preproc, u'macro_body'), + (Punctuation, u'#'), + (Text, u'\n'), + (Comment.Preproc, u'#define'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Comment.Preproc, u'MACRO'), + (Punctuation, u'('), + (Comment.Preproc, u'ARG1'), + (Punctuation, u"'"), + (Comment.Preproc, u'ARG2'), + (Punctuation, u"'"), + (Text, u' '), + (Comment.Preproc, u'ARG3'), + (Punctuation, u')'), + (Comment.Multiline, u'/**/'), + (Text, u'\n'), + (Punctuation, u'#'), + (Comment.Preproc, u'\\#'), + (Comment.Preproc, u'macro\nbody'), + (Comment.Preproc, u'\\#'), + (Punctuation, u'#'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_macro_preprocessor_directives(lexer): + for directive in ['#ifdef', '#ifndef', '#undef']: + fragment = directive + ' MACRO' tokens = [ - (Comment.Preproc, u'$MACRO.'), - (Punctuation, u'('), - (Comment.Preproc, u'('), - (Comment.Preproc, u'('), - (Comment.Preproc, u'x#y\\)'), - (Comment.Preproc, u')'), - (Comment.Preproc, u')'), - (Punctuation, u"'"), - (Comment.Preproc, u' '), - (String, u'"'), - (Error, u'('), - (Error, u'#'), - (Error, u"'"), - (String, u'x'), - (Error, u')'), - (Comment.Preproc, u'\\)'), - (String, u'x'), - (Comment.Preproc, u'\\)'), - (Error, u')'), - (String, u'"'), - (Punctuation, u'#'), - (Comment.Preproc, u' '), - (String, u'{{'), - (String, u'x'), - (Comment.Preproc, u'\\)'), - (Error, u')'), - (String, u'x'), - (Error, u')'), - (Comment.Preproc, u'\\)'), - (Error, u'('), - (Error, u'#'), - (Error, u"'"), - (String, u'}}'), - (Punctuation, u')'), - (Comment.Single, u';'), + (Comment.Preproc, directive), + (Text, u' '), + (Comment.Preproc, u'MACRO'), (Text, u'\n') ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + assert list(lexer.get_tokens(fragment)) == tokens - def testName(self): - fragment = 'kG:V' - tokens = [ - (Keyword.Type, 'k'), - (Name, 'G'), - (Punctuation, ':'), - (Name, 'V'), - (Text, '\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + +def test_other_preprocessor_directives(lexer): + fragment = dedent('''\ + #else + #end + #endif + ### + @ \t12345 + @@ \t67890 + ''') + tokens = [ + (Comment.Preproc, u'#else'), + (Text, u'\n'), + (Comment.Preproc, u'#end'), + (Text, u'\n'), + (Comment.Preproc, u'#endif'), + (Text, u'\n'), + (Comment.Preproc, u'###'), + (Text, u'\n'), + (Comment.Preproc, u'@ \t12345'), + (Text, u'\n'), + (Comment.Preproc, u'@@ \t67890'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_function_like_macros(lexer): + fragment = "$MACRO.(((x#y\\)))' \"(#'x)\\)x\\))\"# {{x\\))x)\\)(#'}});" + tokens = [ + (Comment.Preproc, u'$MACRO.'), + (Punctuation, u'('), + (Comment.Preproc, u'('), + (Comment.Preproc, u'('), + (Comment.Preproc, u'x#y\\)'), + (Comment.Preproc, u')'), + (Comment.Preproc, u')'), + (Punctuation, u"'"), + (Comment.Preproc, u' '), + (String, u'"'), + (Error, u'('), + (Error, u'#'), + (Error, u"'"), + (String, u'x'), + (Error, u')'), + (Comment.Preproc, u'\\)'), + (String, u'x'), + (Comment.Preproc, u'\\)'), + (Error, u')'), + (String, u'"'), + (Punctuation, u'#'), + (Comment.Preproc, u' '), + (String, u'{{'), + (String, u'x'), + (Comment.Preproc, u'\\)'), + (Error, u')'), + (String, u'x'), + (Error, u')'), + (Comment.Preproc, u'\\)'), + (Error, u'('), + (Error, u'#'), + (Error, u"'"), + (String, u'}}'), + (Punctuation, u')'), + (Comment.Single, u';'), + (Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_name(lexer): + fragment = 'kG:V' + tokens = [ + (Keyword.Type, 'k'), + (Name, 'G'), + (Punctuation, ':'), + (Name, 'V'), + (Text, '\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_data.py b/tests/test_data.py index be371419..b590c126 100644 --- a/tests/test_data.py +++ b/tests/test_data.py @@ -7,94 +7,98 @@ :license: BSD, see LICENSE for details. """ -import unittest +import pytest from pygments.lexers import JsonLexer, JsonBareObjectLexer from pygments.token import Token -class JsonTest(unittest.TestCase): - def setUp(self): - self.lexer = JsonLexer() +@pytest.fixture(scope='module') +def lexer(): + yield JsonLexer() - def testBasic(self): - fragment = u'{"foo": "bar", "foo2": [1, 2, 3]}\n' - tokens = [ - (Token.Punctuation, u'{'), - (Token.Name.Tag, u'"foo"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Literal.String.Double, u'"bar"'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Name.Tag, u'"foo2"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Punctuation, u'['), - (Token.Literal.Number.Integer, u'1'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'2'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'3'), - (Token.Punctuation, u']'), - (Token.Punctuation, u'}'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) -class JsonBareObjectTest(unittest.TestCase): - def setUp(self): - self.lexer = JsonBareObjectLexer() +@pytest.fixture(scope='module') +def lexer_bare(): + yield JsonBareObjectLexer() - def testBasic(self): - # This is the same as testBasic for JsonLexer above, except the - # enclosing curly braces are removed. - fragment = u'"foo": "bar", "foo2": [1, 2, 3]\n' - tokens = [ - (Token.Name.Tag, u'"foo"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Literal.String.Double, u'"bar"'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Name.Tag, u'"foo2"'), - (Token.Punctuation, u':'), - (Token.Text, u' '), - (Token.Punctuation, u'['), - (Token.Literal.Number.Integer, u'1'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'2'), - (Token.Punctuation, u','), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'3'), - (Token.Punctuation, u']'), - (Token.Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testClosingCurly(self): - # This can be an Error token, but should not be a can't-pop-from-stack - # exception. - fragment = '}"a"\n' - tokens = [ - (Token.Error, '}'), - (Token.Name.Tag, '"a"'), - (Token.Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_basic_json(lexer): + fragment = u'{"foo": "bar", "foo2": [1, 2, 3]}\n' + tokens = [ + (Token.Punctuation, u'{'), + (Token.Name.Tag, u'"foo"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Literal.String.Double, u'"bar"'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Name.Tag, u'"foo2"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Punctuation, u'['), + (Token.Literal.Number.Integer, u'1'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Punctuation, u']'), + (Token.Punctuation, u'}'), + (Token.Text, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens - def testClosingCurlyInValue(self): - fragment = '"": ""}\n' - tokens = [ - (Token.Name.Tag, '""'), - (Token.Punctuation, ':'), - (Token.Text, ' '), - (Token.Literal.String.Double, '""'), - (Token.Error, '}'), - (Token.Text, '\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) +def test_basic_bare(lexer_bare): + # This is the same as testBasic for JsonLexer above, except the + # enclosing curly braces are removed. + fragment = u'"foo": "bar", "foo2": [1, 2, 3]\n' + tokens = [ + (Token.Name.Tag, u'"foo"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Literal.String.Double, u'"bar"'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Name.Tag, u'"foo2"'), + (Token.Punctuation, u':'), + (Token.Text, u' '), + (Token.Punctuation, u'['), + (Token.Literal.Number.Integer, u'1'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'2'), + (Token.Punctuation, u','), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Punctuation, u']'), + (Token.Text, u'\n'), + ] + assert list(lexer_bare.get_tokens(fragment)) == tokens + + +def test_closing_curly(lexer_bare): + # This can be an Error token, but should not be a can't-pop-from-stack + # exception. + fragment = '}"a"\n' + tokens = [ + (Token.Error, '}'), + (Token.Name.Tag, '"a"'), + (Token.Text, '\n'), + ] + assert list(lexer_bare.get_tokens(fragment)) == tokens + + +def test_closing_curly_in_value(lexer_bare): + fragment = '"": ""}\n' + tokens = [ + (Token.Name.Tag, '""'), + (Token.Punctuation, ':'), + (Token.Text, ' '), + (Token.Literal.String.Double, '""'), + (Token.Error, '}'), + (Token.Text, '\n'), + ] + assert list(lexer_bare.get_tokens(fragment)) == tokens diff --git a/tests/test_ezhil.py b/tests/test_ezhil.py index 15cc13b1..8047a30a 100644 --- a/tests/test_ezhil.py +++ b/tests/test_ezhil.py @@ -7,177 +7,171 @@ :license: BSD, see LICENSE for details. """ -import unittest +import pytest from pygments.token import Operator, Number, Text, Token from pygments.lexers import EzhilLexer -class EzhilTest(unittest.TestCase): +@pytest.fixture(scope='module') +def lexer(): + yield EzhilLexer() - def setUp(self): - self.lexer = EzhilLexer() - self.maxDiff = None - - def testSum(self): - fragment = u'1+3\n' - tokens = [ - (Number.Integer, u'1'), - (Operator, u'+'), - (Number.Integer, u'3'), - (Text, u'\n'), - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - - def testGCDExpr(self): - fragment = u'1^3+(5-5)*gcd(a,b)\n' - tokens = [ - (Token.Number.Integer,u'1'), - (Token.Operator,u'^'), - (Token.Literal.Number.Integer, u'3'), - (Token.Operator, u'+'), - (Token.Punctuation, u'('), - (Token.Literal.Number.Integer, u'5'), - (Token.Operator, u'-'), - (Token.Literal.Number.Integer, u'5'), - (Token.Punctuation, u')'), - (Token.Operator, u'*'), - (Token.Name, u'gcd'), - (Token.Punctuation, u'('), - (Token.Name, u'a'), - (Token.Operator, u','), - (Token.Name, u'b'), - (Token.Punctuation, u')'), - (Token.Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - def testIfStatement(self): - fragment = u"""@( 0 > 3 ) ஆனால் - பதிப்பி "wont print" +def test_sum(lexer): + fragment = u'1+3\n' + tokens = [ + (Number.Integer, u'1'), + (Operator, u'+'), + (Number.Integer, u'3'), + (Text, u'\n'), + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_gcd_expr(lexer): + fragment = u'1^3+(5-5)*gcd(a,b)\n' + tokens = [ + (Token.Number.Integer, u'1'), + (Token.Operator, u'^'), + (Token.Literal.Number.Integer, u'3'), + (Token.Operator, u'+'), + (Token.Punctuation, u'('), + (Token.Literal.Number.Integer, u'5'), + (Token.Operator, u'-'), + (Token.Literal.Number.Integer, u'5'), + (Token.Punctuation, u')'), + (Token.Operator, u'*'), + (Token.Name, u'gcd'), + (Token.Punctuation, u'('), + (Token.Name, u'a'), + (Token.Operator, u','), + (Token.Name, u'b'), + (Token.Punctuation, u')'), + (Token.Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + + +def test_if_statement(lexer): + fragment = u"""@( 0 > 3 ) ஆனால் + பதிப்பி "wont print" முடி""" - tokens = [ - (Token.Operator, u'@'), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Literal.Number.Integer,u'0'), - (Token.Text, u' '), - (Token.Operator,u'>'), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'3'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u' '), - (Token.Keyword, u'ஆனால்'), - (Token.Text, u'\n'), - (Token.Text, u'\t'), - (Token.Keyword, u'பதிப்பி'), - (Token.Text, u' '), - (Token.Literal.String, u'"wont print"'), - (Token.Text, u'\t'), - (Token.Text, u'\n'), - (Token.Keyword, u'முடி'), - (Token.Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) + tokens = [ + (Token.Operator, u'@'), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Text, u' '), + (Token.Operator, u'>'), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'3'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Keyword, u'ஆனால்'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'பதிப்பி'), + (Token.Text, u' '), + (Token.Literal.String, u'"wont print"'), + (Token.Text, u'\n'), + (Token.Keyword, u'முடி'), + (Token.Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens + - def testFunction(self): - fragment = u"""# (C) முத்தையா அண்ணாமலை 2013, 2015 +def test_function(lexer): + fragment = u"""# (C) முத்தையா அண்ணாமலை 2013, 2015 நிரல்பாகம் gcd ( x, y ) - மு = max(x,y) - q = min(x,y) +மு = max(x,y) + q = min(x,y) - @( q == 0 ) ஆனால் - பின்கொடு மு - முடி - பின்கொடு gcd( மு - q , q ) +@( q == 0 ) ஆனால் + பின்கொடு மு +முடி +பின்கொடு gcd( மு - q , q ) முடி\n""" - tokens = [ - (Token.Comment.Single, - u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85' - u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'), - (Token.Keyword,u'நிரல்பாகம்'), - (Token.Text, u' '), - (Token.Name, u'gcd'), - (Token.Text, u' '), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Name, u'x'), - (Token.Operator, u','), - (Token.Text, u' '), - (Token.Name, u'y'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Name, u'\u0bae\u0bc1'), - (Token.Text, u' '), - (Token.Operator, u'='), - (Token.Text, u' '), - (Token.Name.Builtin, u'max'), - (Token.Punctuation, u'('), - (Token.Name, u'x'), - (Token.Operator, u','), - (Token.Name, u'y'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Operator, u'='), - (Token.Text, u' '), - (Token.Name.Builtin, u'min'), - (Token.Punctuation, u'('), - (Token.Name, u'x'), - (Token.Operator, u','), - (Token.Name, u'y'), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Operator, u'@'), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Operator, u'=='), - (Token.Text, u' '), - (Token.Literal.Number.Integer, u'0'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u' '), - (Token.Keyword, u'ஆனால்'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'பின்கொடு'), - (Token.Text, u' '), - (Token.Name, u'\u0bae\u0bc1'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'முடி'), - (Token.Text, u'\n'), - (Token.Text, u' '), - (Token.Keyword, u'\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'), - (Token.Text, u' '), - (Token.Name, u'gcd'), - (Token.Punctuation, u'('), - (Token.Text, u' '), - (Token.Name, u'\u0bae\u0bc1'), - (Token.Text, u' '), - (Token.Operator, u'-'), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Operator, u','), - (Token.Text, u' '), - (Token.Name, u'q'), - (Token.Text, u' '), - (Token.Punctuation, u')'), - (Token.Text, u'\n'), - (Token.Keyword, u'முடி'), #u'\u0bae\u0bc1\u0b9f\u0bbf'), - (Token.Text, u'\n') - ] - self.assertEqual(tokens, list(self.lexer.get_tokens(fragment))) - -if __name__ == "__main__": - unittest.main() + tokens = [ + (Token.Comment.Single, + u'# (C) \u0bae\u0bc1\u0ba4\u0bcd\u0ba4\u0bc8\u0baf\u0bbe \u0b85' + u'\u0ba3\u0bcd\u0ba3\u0bbe\u0bae\u0bb2\u0bc8 2013, 2015\n'), + (Token.Keyword, u'நிரல்பாகம்'), + (Token.Text, u' '), + (Token.Name, u'gcd'), + (Token.Text, u' '), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Name, u'x'), + (Token.Operator, u','), + (Token.Text, u' '), + (Token.Name, u'y'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Name, u'\u0bae\u0bc1'), + (Token.Text, u' '), + (Token.Operator, u'='), + (Token.Text, u' '), + (Token.Name.Builtin, u'max'), + (Token.Punctuation, u'('), + (Token.Name, u'x'), + (Token.Operator, u','), + (Token.Name, u'y'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Operator, u'='), + (Token.Text, u' '), + (Token.Name.Builtin, u'min'), + (Token.Punctuation, u'('), + (Token.Name, u'x'), + (Token.Operator, u','), + (Token.Name, u'y'), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Text, u'\n'), + (Token.Operator, u'@'), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Operator, u'=='), + (Token.Text, u' '), + (Token.Literal.Number.Integer, u'0'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u' '), + (Token.Keyword, u'ஆனால்'), + (Token.Text, u'\n'), + (Token.Text, u' '), + (Token.Keyword, u'பின்கொடு'), + (Token.Text, u' '), + (Token.Name, u'\u0bae\u0bc1'), + (Token.Text, u'\n'), + (Token.Keyword, u'முடி'), + (Token.Text, u'\n'), + (Token.Keyword, u'\u0baa\u0bbf\u0ba9\u0bcd\u0b95\u0bca\u0b9f\u0bc1'), + (Token.Text, u' '), + (Token.Name, u'gcd'), + (Token.Punctuation, u'('), + (Token.Text, u' '), + (Token.Name, u'\u0bae\u0bc1'), + (Token.Text, u' '), + (Token.Operator, u'-'), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Operator, u','), + (Token.Text, u' '), + (Token.Name, u'q'), + (Token.Text, u' '), + (Token.Punctuation, u')'), + (Token.Text, u'\n'), + (Token.Keyword, u'முடி'), # u'\u0bae\u0bc1\u0b9f\u0bbf'), + (Token.Text, u'\n') + ] + assert list(lexer.get_tokens(fragment)) == tokens diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py index 055cf260..95247398 100644 --- a/tests/test_html_formatter.py +++ b/tests/test_html_formatter.py @@ -3,7 +3,7 @@ Pygments HTML formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ @@ -12,10 +12,11 @@ from __future__ import print_function import io import os import re -import unittest import tempfile from os import path +from pytest import raises + from pygments.util import StringIO from pygments.lexers import PythonLexer from pygments.formatters import HtmlFormatter, NullFormatter @@ -28,173 +29,182 @@ with io.open(TESTFILE, encoding='utf-8') as fp: tokensource = list(PythonLexer().get_tokens(fp.read())) -class HtmlFormatterTest(unittest.TestCase): - def test_correct_output(self): - hfmt = HtmlFormatter(nowrap=True) - houtfile = StringIO() - hfmt.format(tokensource, houtfile) - - nfmt = NullFormatter() - noutfile = StringIO() - nfmt.format(tokensource, noutfile) - - stripped_html = re.sub('<.*?>', '', houtfile.getvalue()) - escaped_text = escape_html(noutfile.getvalue()) - self.assertEqual(stripped_html, escaped_text) - - def test_external_css(self): - # test correct behavior - # CSS should be in /tmp directory - fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8') - # CSS should be in TESTDIR (TESTDIR is absolute) - fmt2 = HtmlFormatter(full=True, cssfile=path.join(TESTDIR, 'fmt2.css'), - outencoding='utf-8') - tfile = tempfile.NamedTemporaryFile(suffix='.html') - fmt1.format(tokensource, tfile) - try: - fmt2.format(tokensource, tfile) - self.assertTrue(path.isfile(path.join(TESTDIR, 'fmt2.css'))) - except IOError: - # test directory not writable - pass - tfile.close() - - self.assertTrue(path.isfile(path.join(path.dirname(tfile.name), - 'fmt1.css'))) - os.unlink(path.join(path.dirname(tfile.name), 'fmt1.css')) - try: - os.unlink(path.join(TESTDIR, 'fmt2.css')) - except OSError: - pass - - def test_all_options(self): - def check(optdict): - outfile = StringIO() - fmt = HtmlFormatter(**optdict) - fmt.format(tokensource, outfile) - - for optdict in [ - dict(nowrap=True), - dict(linenos=True, full=True), - dict(linenos=True, linespans='L'), - dict(hl_lines=[1, 5, 10, 'xxx']), - dict(hl_lines=[1, 5, 10], noclasses=True), - ]: - check(optdict) - - for linenos in [False, 'table', 'inline']: - for noclasses in [False, True]: - for linenospecial in [0, 5]: - for anchorlinenos in [False, True]: - optdict = dict( - linenos=linenos, - noclasses=noclasses, - linenospecial=linenospecial, - anchorlinenos=anchorlinenos, - ) - check(optdict) - - def test_linenos(self): - optdict = dict(linenos=True) +def test_correct_output(): + hfmt = HtmlFormatter(nowrap=True) + houtfile = StringIO() + hfmt.format(tokensource, houtfile) + + nfmt = NullFormatter() + noutfile = StringIO() + nfmt.format(tokensource, noutfile) + + stripped_html = re.sub('<.*?>', '', houtfile.getvalue()) + escaped_text = escape_html(noutfile.getvalue()) + assert stripped_html == escaped_text + + +def test_external_css(): + # test correct behavior + # CSS should be in /tmp directory + fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8') + # CSS should be in TESTDIR (TESTDIR is absolute) + fmt2 = HtmlFormatter(full=True, cssfile=path.join(TESTDIR, 'fmt2.css'), + outencoding='utf-8') + tfile = tempfile.NamedTemporaryFile(suffix='.html') + fmt1.format(tokensource, tfile) + try: + fmt2.format(tokensource, tfile) + assert path.isfile(path.join(TESTDIR, 'fmt2.css')) + except IOError: + # test directory not writable + pass + tfile.close() + + assert path.isfile(path.join(path.dirname(tfile.name), 'fmt1.css')) + os.unlink(path.join(path.dirname(tfile.name), 'fmt1.css')) + try: + os.unlink(path.join(TESTDIR, 'fmt2.css')) + except OSError: + pass + + +def test_all_options(): + def check(optdict): outfile = StringIO() fmt = HtmlFormatter(**optdict) fmt.format(tokensource, outfile) - html = outfile.getvalue() - self.assertTrue(re.search(r"
\s+1\s+2\s+3", html))
 
-    def test_linenos_with_startnum(self):
-        optdict = dict(linenos=True, linenostart=5)
+    for optdict in [
+        dict(nowrap=True),
+        dict(linenos=True, full=True),
+        dict(linenos=True, linespans='L'),
+        dict(hl_lines=[1, 5, 10, 'xxx']),
+        dict(hl_lines=[1, 5, 10], noclasses=True),
+    ]:
+        check(optdict)
+
+    for linenos in [False, 'table', 'inline']:
+        for noclasses in [False, True]:
+            for linenospecial in [0, 5]:
+                for anchorlinenos in [False, True]:
+                    optdict = dict(
+                        linenos=linenos,
+                        noclasses=noclasses,
+                        linenospecial=linenospecial,
+                        anchorlinenos=anchorlinenos,
+                    )
+                    check(optdict)
+
+
+def test_linenos():
+    optdict = dict(linenos=True)
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search(r"
\s+1\s+2\s+3", html)
+
+
+def test_linenos_with_startnum():
+    optdict = dict(linenos=True, linenostart=5)
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search(r"
\s+5\s+6\s+7", html)
+
+
+def test_lineanchors():
+    optdict = dict(lineanchors="foo")
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search("
", html)
+
+
+def test_lineanchors_with_startnum():
+    optdict = dict(lineanchors="foo", linenostart=5)
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search("
", html)
+
+
+def test_valid_output():
+    # test all available wrappers
+    fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
+                        outencoding='utf-8')
+
+    handle, pathname = tempfile.mkstemp('.html')
+    with os.fdopen(handle, 'w+b') as tfile:
+        fmt.format(tokensource, tfile)
+    catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
+    try:
+        import subprocess
+        po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
+                              stdout=subprocess.PIPE)
+        ret = po.wait()
+        output = po.stdout.read()
+        po.stdout.close()
+    except OSError:
+        # nsgmls not available
+        pass
+    else:
+        if ret:
+            print(output)
+        assert not ret, 'nsgmls run reported errors'
+
+    os.unlink(pathname)
+
+
+def test_get_style_defs():
+    fmt = HtmlFormatter()
+    sd = fmt.get_style_defs()
+    assert sd.startswith('.')
+
+    fmt = HtmlFormatter(cssclass='foo')
+    sd = fmt.get_style_defs()
+    assert sd.startswith('.foo')
+    sd = fmt.get_style_defs('.bar')
+    assert sd.startswith('.bar')
+    sd = fmt.get_style_defs(['.bar', '.baz'])
+    fl = sd.splitlines()[0]
+    assert '.bar' in fl and '.baz' in fl
+
+
+def test_unicode_options():
+    fmt = HtmlFormatter(title=u'Föö',
+                        cssclass=u'bär',
+                        cssstyles=u'div:before { content: \'bäz\' }',
+                        encoding='utf-8')
+    handle, pathname = tempfile.mkstemp('.html')
+    with os.fdopen(handle, 'w+b') as tfile:
+        fmt.format(tokensource, tfile)
+
+
+def test_ctags():
+    try:
+        import ctags
+    except ImportError:
+        # we can't check without the ctags module, but at least check the exception
+        assert raises(RuntimeError, HtmlFormatter, tagsfile='support/tags')
+    else:
+        # this tagfile says that test_ctags() is on line 165, even if it isn't
+        # anymore in the actual source
+        fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
+                            tagurlformat='%(fname)s%(fext)s')
         outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
         fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search(r"
\s+5\s+6\s+7", html))
+        assert 'test_ctags' \
+            in outfile.getvalue()
 
-    def test_lineanchors(self):
-        optdict = dict(lineanchors="foo")
-        outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
-        fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search("
", html))
 
-    def test_lineanchors_with_startnum(self):
-        optdict = dict(lineanchors="foo", linenostart=5)
-        outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
-        fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search("
", html))
-
-    def test_valid_output(self):
-        # test all available wrappers
-        fmt = HtmlFormatter(full=True, linenos=True, noclasses=True,
-                            outencoding='utf-8')
-
-        handle, pathname = tempfile.mkstemp('.html')
-        with os.fdopen(handle, 'w+b') as tfile:
-            fmt.format(tokensource, tfile)
-        catname = os.path.join(TESTDIR, 'dtds', 'HTML4.soc')
-        try:
-            import subprocess
-            po = subprocess.Popen(['nsgmls', '-s', '-c', catname, pathname],
-                                  stdout=subprocess.PIPE)
-            ret = po.wait()
-            output = po.stdout.read()
-            po.stdout.close()
-        except OSError:
-            # nsgmls not available
-            pass
-        else:
-            if ret:
-                print(output)
-            self.assertFalse(ret, 'nsgmls run reported errors')
-
-        os.unlink(pathname)
-
-    def test_get_style_defs(self):
-        fmt = HtmlFormatter()
-        sd = fmt.get_style_defs()
-        self.assertTrue(sd.startswith('.'))
-
-        fmt = HtmlFormatter(cssclass='foo')
-        sd = fmt.get_style_defs()
-        self.assertTrue(sd.startswith('.foo'))
-        sd = fmt.get_style_defs('.bar')
-        self.assertTrue(sd.startswith('.bar'))
-        sd = fmt.get_style_defs(['.bar', '.baz'])
-        fl = sd.splitlines()[0]
-        self.assertTrue('.bar' in fl and '.baz' in fl)
-
-    def test_unicode_options(self):
-        fmt = HtmlFormatter(title=u'Föö',
-                            cssclass=u'bär',
-                            cssstyles=u'div:before { content: \'bäz\' }',
-                            encoding='utf-8')
-        handle, pathname = tempfile.mkstemp('.html')
-        with os.fdopen(handle, 'w+b') as tfile:
-            fmt.format(tokensource, tfile)
-
-    def test_ctags(self):
-        try:
-            import ctags
-        except ImportError:
-            # we can't check without the ctags module, but at least check the exception
-            self.assertRaises(RuntimeError, HtmlFormatter, tagsfile='support/tags')
-        else:
-            # this tagfile says that test_ctags() is on line 165, even if it isn't
-            # anymore in the actual source
-            fmt = HtmlFormatter(tagsfile='support/tags', lineanchors='L',
-                                tagurlformat='%(fname)s%(fext)s')
-            outfile = StringIO()
-            fmt.format(tokensource, outfile)
-            self.assertTrue('test_ctags'
-                            in outfile.getvalue())
-
-    def test_filename(self):
-        optdict = dict(filename="test.py")
-        outfile = StringIO()
-        fmt = HtmlFormatter(**optdict)
-        fmt.format(tokensource, outfile)
-        html = outfile.getvalue()
-        self.assertTrue(re.search("test.py
", html))
+def test_filename():
+    optdict = dict(filename="test.py")
+    outfile = StringIO()
+    fmt = HtmlFormatter(**optdict)
+    fmt.format(tokensource, outfile)
+    html = outfile.getvalue()
+    assert re.search("test.py
", html)
diff --git a/tests/test_inherit.py b/tests/test_inherit.py
index 5da57dd9..03527724 100644
--- a/tests/test_inherit.py
+++ b/tests/test_inherit.py
@@ -3,41 +3,14 @@
     Tests for inheritance in RegexLexer
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
-
 from pygments.lexer import RegexLexer, inherit
 from pygments.token import Text
 
 
-class InheritTest(unittest.TestCase):
-    def test_single_inheritance_position(self):
-        t = Two()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['x', 'a', 'b', 'y'], pats)
-    def test_multi_inheritance_beginning(self):
-        t = Beginning()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['x', 'a', 'b', 'y', 'm'], pats)
-    def test_multi_inheritance_end(self):
-        t = End()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['m', 'x', 'a', 'b', 'y'], pats)
-
-    def test_multi_inheritance_position(self):
-        t = Three()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['i', 'x', 'a', 'b', 'y', 'j'], pats)
-
-    def test_single_inheritance_with_skip(self):
-        t = Skipped()
-        pats = [x[0].__self__.pattern for x in t._tokens['root']]
-        self.assertEqual(['x', 'a', 'b', 'y'], pats)
-
-
 class One(RegexLexer):
     tokens = {
         'root': [
@@ -46,6 +19,7 @@ class One(RegexLexer):
         ],
     }
 
+
 class Two(One):
     tokens = {
         'root': [
@@ -55,6 +29,7 @@ class Two(One):
         ],
     }
 
+
 class Three(Two):
     tokens = {
         'root': [
@@ -64,6 +39,7 @@ class Three(Two):
         ],
     }
 
+
 class Beginning(Two):
     tokens = {
         'root': [
@@ -72,6 +48,7 @@ class Beginning(Two):
         ],
     }
 
+
 class End(Two):
     tokens = {
         'root': [
@@ -80,9 +57,11 @@ class End(Two):
         ],
     }
 
+
 class Empty(One):
     tokens = {}
 
+
 class Skipped(Empty):
     tokens = {
         'root': [
@@ -92,3 +71,32 @@ class Skipped(Empty):
         ],
     }
 
+
+def test_single_inheritance_position():
+    t = Two()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['x', 'a', 'b', 'y'] == pats
+
+
+def test_multi_inheritance_beginning():
+    t = Beginning()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['x', 'a', 'b', 'y', 'm'] == pats
+
+
+def test_multi_inheritance_end():
+    t = End()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['m', 'x', 'a', 'b', 'y'] == pats
+
+
+def test_multi_inheritance_position():
+    t = Three()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['i', 'x', 'a', 'b', 'y', 'j'] == pats
+
+
+def test_single_inheritance_with_skip():
+    t = Skipped()
+    pats = [x[0].__self__.pattern for x in t._tokens['root']]
+    assert ['x', 'a', 'b', 'y'] == pats
diff --git a/tests/test_irc_formatter.py b/tests/test_irc_formatter.py
index 4d56aa90..046a0d19 100644
--- a/tests/test_irc_formatter.py
+++ b/tests/test_irc_formatter.py
@@ -3,14 +3,12 @@
     Pygments IRC formatter tests
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 from __future__ import print_function
 
-import unittest
-
 from pygments.util import StringIO
 from pygments.lexers import PythonLexer
 from pygments.formatters import IRCFormatter
@@ -18,10 +16,9 @@ from pygments.formatters import IRCFormatter
 tokensource = list(PythonLexer().get_tokens("lambda x: 123"))
 
 
-class IRCFormatterTest(unittest.TestCase):
-    def test_correct_output(self):
-        hfmt = IRCFormatter()
-        houtfile = StringIO()
-        hfmt.format(tokensource, houtfile)
+def test_correct_output():
+    hfmt = IRCFormatter()
+    houtfile = StringIO()
+    hfmt.format(tokensource, houtfile)
 
-        self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
+    assert u'\x0302lambda\x03 x: \x0302123\x03\n' == houtfile.getvalue()
diff --git a/tests/test_java.py b/tests/test_java.py
index 6e5e8992..40a1ec1b 100644
--- a/tests/test_java.py
+++ b/tests/test_java.py
@@ -3,76 +3,76 @@
     Basic JavaLexer Test
     ~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Text, Name, Operator, Keyword, Number
 from pygments.lexers import JavaLexer
 
 
-class JavaTest(unittest.TestCase):
+@pytest.fixture(scope='module')
+def lexer():
+    yield JavaLexer()
 
-    def setUp(self):
-        self.lexer = JavaLexer()
-        self.maxDiff = None
 
-    def testEnhancedFor(self):
-        fragment = u'label:\nfor(String var2: var1) {}\n'
-        tokens = [
-            (Name.Label, u'label:'),
-            (Text, u'\n'),
-            (Keyword, u'for'),
-            (Operator, u'('),
-            (Name, u'String'),
-            (Text, u' '),
-            (Name, u'var2'),
-            (Operator, u':'),
-            (Text, u' '),
-            (Name, u'var1'),
-            (Operator, u')'),
-            (Text, u' '),
-            (Operator, u'{'),
-            (Operator, u'}'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def testEnhancedFor(lexer):
+    fragment = u'label:\nfor(String var2: var1) {}\n'
+    tokens = [
+        (Name.Label, u'label:'),
+        (Text, u'\n'),
+        (Keyword, u'for'),
+        (Operator, u'('),
+        (Name, u'String'),
+        (Text, u' '),
+        (Name, u'var2'),
+        (Operator, u':'),
+        (Text, u' '),
+        (Name, u'var1'),
+        (Operator, u')'),
+        (Text, u' '),
+        (Operator, u'{'),
+        (Operator, u'}'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
 
-    def testNumericLiterals(self):
-        fragment = '0 5L 9__542_72l 0xbEEf 0X9_A 0_35 01 0b0___101_0'
-        fragment += ' 0. .7_17F 3e-1_3d 1f 6_01.9e+3 0x.1Fp3 0XEP8D\n'
-        tokens = [
-            (Number.Integer, '0'),
-            (Text, ' '),
-            (Number.Integer, '5L'),
-            (Text, ' '),
-            (Number.Integer, '9__542_72l'),
-            (Text, ' '),
-            (Number.Hex, '0xbEEf'),
-            (Text, ' '),
-            (Number.Hex, '0X9_A'),
-            (Text, ' '),
-            (Number.Oct, '0_35'),
-            (Text, ' '),
-            (Number.Oct, '01'),
-            (Text, ' '),
-            (Number.Bin, '0b0___101_0'),
-            (Text, ' '),
-            (Number.Float, '0.'),
-            (Text, ' '),
-            (Number.Float, '.7_17F'),
-            (Text, ' '),
-            (Number.Float, '3e-1_3d'),
-            (Text, ' '),
-            (Number.Float, '1f'),
-            (Text, ' '),
-            (Number.Float, '6_01.9e+3'),
-            (Text, ' '),
-            (Number.Float, '0x.1Fp3'),
-            (Text, ' '),
-            (Number.Float, '0XEP8D'),
-            (Text, '\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_numeric_literals(lexer):
+    fragment = '0 5L 9__542_72l 0xbEEf 0X9_A 0_35 01 0b0___101_0'
+    fragment += ' 0. .7_17F 3e-1_3d 1f 6_01.9e+3 0x.1Fp3 0XEP8D\n'
+    tokens = [
+        (Number.Integer, '0'),
+        (Text, ' '),
+        (Number.Integer, '5L'),
+        (Text, ' '),
+        (Number.Integer, '9__542_72l'),
+        (Text, ' '),
+        (Number.Hex, '0xbEEf'),
+        (Text, ' '),
+        (Number.Hex, '0X9_A'),
+        (Text, ' '),
+        (Number.Oct, '0_35'),
+        (Text, ' '),
+        (Number.Oct, '01'),
+        (Text, ' '),
+        (Number.Bin, '0b0___101_0'),
+        (Text, ' '),
+        (Number.Float, '0.'),
+        (Text, ' '),
+        (Number.Float, '.7_17F'),
+        (Text, ' '),
+        (Number.Float, '3e-1_3d'),
+        (Text, ' '),
+        (Number.Float, '1f'),
+        (Text, ' '),
+        (Number.Float, '6_01.9e+3'),
+        (Text, ' '),
+        (Number.Float, '0x.1Fp3'),
+        (Text, ' '),
+        (Number.Float, '0XEP8D'),
+        (Text, '\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_javascript.py b/tests/test_javascript.py
index 040d776c..25e06fdc 100644
--- a/tests/test_javascript.py
+++ b/tests/test_javascript.py
@@ -3,12 +3,10 @@
     Javascript tests
     ~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
-
 import pytest
 
 from pygments.lexers import CoffeeScriptLexer
@@ -39,11 +37,15 @@ COFFEE_SLASH_GOLDEN = [
 ]
 
 
+@pytest.fixture(scope='module')
+def lexer():
+    yield CoffeeScriptLexer()
+
+
 @pytest.mark.parametrize('golden', COFFEE_SLASH_GOLDEN)
-def test_coffee_slashes(golden):
+def test_coffee_slashes(lexer, golden):
     input_str, slashes_are_regex_here = golden
-    lex = CoffeeScriptLexer()
-    output = list(lex.get_tokens(input_str))
+    output = list(lexer.get_tokens(input_str))
     print(output)
     for t, s in output:
         if '/' in s:
@@ -51,36 +53,33 @@ def test_coffee_slashes(golden):
             assert is_regex == slashes_are_regex_here, (t, s)
 
 
-class CoffeeTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = CoffeeScriptLexer()
+def test_mixed_slashes(lexer):
+    fragment = u'a?/foo/:1/2;\n'
+    tokens = [
+        (Token.Name.Other, u'a'),
+        (Token.Operator, u'?'),
+        (Token.Literal.String.Regex, u'/foo/'),
+        (Token.Operator, u':'),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'/'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
 
-    def testMixedSlashes(self):
-        fragment = u'a?/foo/:1/2;\n'
-        tokens = [
-            (Token.Name.Other, u'a'),
-            (Token.Operator, u'?'),
-            (Token.Literal.String.Regex, u'/foo/'),
-            (Token.Operator, u':'),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'/'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
 
-    def testBewareInfiniteLoop(self):
-        # This demonstrates the case that "This isn't really guarding" comment
-        # refers to.
-        fragment = '/a/x;\n'
-        tokens = [
-            (Token.Text, ''),
-            (Token.Operator, '/'),
-            (Token.Name.Other, 'a'),
-            (Token.Operator, '/'),
-            (Token.Name.Other, 'x'),
-            (Token.Punctuation, ';'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_beware_infinite_loop(lexer):
+    # This demonstrates the case that "This isn't really guarding" comment
+    # refers to.
+    fragment = '/a/x;\n'
+    tokens = [
+        (Token.Text, ''),
+        (Token.Operator, '/'),
+        (Token.Name.Other, 'a'),
+        (Token.Operator, '/'),
+        (Token.Name.Other, 'x'),
+        (Token.Punctuation, ';'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_julia.py b/tests/test_julia.py
index ed46f27e..3f115931 100644
--- a/tests/test_julia.py
+++ b/tests/test_julia.py
@@ -3,56 +3,57 @@
     Julia Tests
     ~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
-from pygments.lexers import JuliaLexer
 from pygments.token import Token
+from pygments.lexers import JuliaLexer
+
 
+@pytest.fixture(scope='module')
+def lexer():
+    yield JuliaLexer()
 
-class JuliaTests(unittest.TestCase):
-    def setUp(self):
-        self.lexer = JuliaLexer()
 
-    def test_unicode(self):
-        """
-        Test that unicode character, √, in an expression is recognized
-        """
-        fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
-        tokens = [
-            (Token.Name, u's'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Operator, u'\u221a'),
-            (Token.Punctuation, u'('),
-            (Token.Punctuation, u'('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'/'),
-            (Token.Name, u'n'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u' '),
-            (Token.Operator, u'*'),
-            (Token.Text, u' '),
-            (Token.Name, u'sum'),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'count'),
-            (Token.Text, u' '),
-            (Token.Operator, u'.^'),
-            (Token.Text, u' '),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Text, u' '),
-            (Token.Name, u'mu'),
-            (Token.Text, u' '),
-            (Token.Operator, u'.^'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_unicode(lexer):
+    """
+    Test that unicode character, √, in an expression is recognized
+    """
+    fragment = u's = \u221a((1/n) * sum(count .^ 2) - mu .^2)\n'
+    tokens = [
+        (Token.Name, u's'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Operator, u'\u221a'),
+        (Token.Punctuation, u'('),
+        (Token.Punctuation, u'('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'/'),
+        (Token.Name, u'n'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u' '),
+        (Token.Operator, u'*'),
+        (Token.Text, u' '),
+        (Token.Name, u'sum'),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'count'),
+        (Token.Text, u' '),
+        (Token.Operator, u'.^'),
+        (Token.Text, u' '),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Text, u' '),
+        (Token.Name, u'mu'),
+        (Token.Text, u' '),
+        (Token.Operator, u'.^'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_kotlin.py b/tests/test_kotlin.py
index bc27908a..1fdcd934 100644
--- a/tests/test_kotlin.py
+++ b/tests/test_kotlin.py
@@ -7,126 +7,127 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Text, Name, Keyword, Punctuation, String
 from pygments.lexers import KotlinLexer
 
 
-class KotlinTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = KotlinLexer()
-        self.maxDiff = None
-
-    def testCanCopeWithBackTickNamesInFunctions(self):
-        fragment = u'fun `wo bble`'
-        tokens = [
-            (Keyword, u'fun'),
-            (Text, u' '),
-            (Name.Function, u'`wo bble`'),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCanCopeWithCommasAndDashesInBackTickNames(self):
-        fragment = u'fun `wo,-bble`'
-        tokens = [
-            (Keyword, u'fun'),
-            (Text, u' '),
-            (Name.Function, u'`wo,-bble`'),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCanCopeWithDestructuring(self):
-        fragment = u'val (a, b) = '
-        tokens = [
-            (Keyword, u'val'),
-            (Text, u' '),
-            (Punctuation, u'('),
-            (Name.Property, u'a'),
-            (Punctuation, u','),
-            (Text, u' '),
-            (Name.Property, u'b'),
-            (Punctuation, u')'),
-            (Text, u' '),
-            (Punctuation, u'='),
-            (Text, u' '),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCanCopeGenericsInDestructuring(self):
-        fragment = u'val (a: List, b: Set) ='
-        tokens = [
-            (Keyword, u'val'),
-            (Text, u' '),
-            (Punctuation, u'('),
-            (Name.Property, u'a'),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name.Property, u'List'),
-            (Punctuation, u'<'),
-            (Name, u'Something'),
-            (Punctuation, u'>'),
-            (Punctuation, u','),
-            (Text, u' '),
-            (Name.Property, u'b'),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name.Property, u'Set'),
-            (Punctuation, u'<'),
-            (Name, u'Wobble'),
-            (Punctuation, u'>'),
-            (Punctuation, u')'),
-            (Text, u' '),
-            (Punctuation, u'='),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCanCopeWithGenerics(self):
-        fragment = u'inline fun  VaultService.queryBy(): Vault.Page {'
-        tokens = [
-            (Keyword, u'inline fun'),
-            (Text, u' '),
-            (Punctuation, u'<'),
-            (Keyword, u'reified'),
-            (Text, u' '),
-            (Name, u'T'),
-            (Text, u' '),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name, u'ContractState'),
-            (Punctuation, u'>'),
-            (Text, u' '),
-            (Name.Class, u'VaultService'),
-            (Punctuation, u'.'),
-            (Name.Function, u'queryBy'),
-            (Punctuation, u'('),
-            (Punctuation, u')'),
-            (Punctuation, u':'),
-            (Text, u' '),
-            (Name, u'Vault'),
-            (Punctuation, u'.'),
-            (Name, u'Page'),
-            (Punctuation, u'<'),
-            (Name, u'T'),
-            (Punctuation, u'>'),
-            (Text, u' '),
-            (Punctuation, u'{'),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testShouldCopeWithMultilineComments(self):
-        fragment = u'"""\nthis\nis\na\ncomment"""'
-        tokens = [
-            (String, u'"""\nthis\nis\na\ncomment"""'),
-            (Text, u'\n')
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-if __name__ == '__main__':
-    unittest.main()
+@pytest.fixture(scope='module')
+def lexer():
+    yield KotlinLexer()
+
+
+def test_can_cope_with_backtick_names_in_functions(lexer):
+    fragment = u'fun `wo bble`'
+    tokens = [
+        (Keyword, u'fun'),
+        (Text, u' '),
+        (Name.Function, u'`wo bble`'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_with_commas_and_dashes_in_backtick_Names(lexer):
+    fragment = u'fun `wo,-bble`'
+    tokens = [
+        (Keyword, u'fun'),
+        (Text, u' '),
+        (Name.Function, u'`wo,-bble`'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_with_destructuring(lexer):
+    fragment = u'val (a, b) = '
+    tokens = [
+        (Keyword, u'val'),
+        (Text, u' '),
+        (Punctuation, u'('),
+        (Name.Property, u'a'),
+        (Punctuation, u','),
+        (Text, u' '),
+        (Name.Property, u'b'),
+        (Punctuation, u')'),
+        (Text, u' '),
+        (Punctuation, u'='),
+        (Text, u' '),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_generics_in_destructuring(lexer):
+    fragment = u'val (a: List, b: Set) ='
+    tokens = [
+        (Keyword, u'val'),
+        (Text, u' '),
+        (Punctuation, u'('),
+        (Name.Property, u'a'),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name.Property, u'List'),
+        (Punctuation, u'<'),
+        (Name, u'Something'),
+        (Punctuation, u'>'),
+        (Punctuation, u','),
+        (Text, u' '),
+        (Name.Property, u'b'),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name.Property, u'Set'),
+        (Punctuation, u'<'),
+        (Name, u'Wobble'),
+        (Punctuation, u'>'),
+        (Punctuation, u')'),
+        (Text, u' '),
+        (Punctuation, u'='),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_can_cope_with_generics(lexer):
+    fragment = u'inline fun  VaultService.queryBy(): Vault.Page {'
+    tokens = [
+        (Keyword, u'inline fun'),
+        (Text, u' '),
+        (Punctuation, u'<'),
+        (Keyword, u'reified'),
+        (Text, u' '),
+        (Name, u'T'),
+        (Text, u' '),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name, u'ContractState'),
+        (Punctuation, u'>'),
+        (Text, u' '),
+        (Name.Class, u'VaultService'),
+        (Punctuation, u'.'),
+        (Name.Function, u'queryBy'),
+        (Punctuation, u'('),
+        (Punctuation, u')'),
+        (Punctuation, u':'),
+        (Text, u' '),
+        (Name, u'Vault'),
+        (Punctuation, u'.'),
+        (Name, u'Page'),
+        (Punctuation, u'<'),
+        (Name, u'T'),
+        (Punctuation, u'>'),
+        (Text, u' '),
+        (Punctuation, u'{'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_should_cope_with_multiline_comments(lexer):
+    fragment = u'"""\nthis\nis\na\ncomment"""'
+    tokens = [
+        (String, u'"""\nthis\nis\na\ncomment"""'),
+        (Text, u'\n')
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_latex_formatter.py b/tests/test_latex_formatter.py
index 9e0dd77c..7ab0d7d0 100644
--- a/tests/test_latex_formatter.py
+++ b/tests/test_latex_formatter.py
@@ -3,14 +3,13 @@
     Pygments LaTeX formatter tests
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 from __future__ import print_function
 
 import os
-import unittest
 import tempfile
 from os import path
 
@@ -23,34 +22,32 @@ TESTDIR = path.dirname(path.abspath(__file__))
 TESTFILE = path.join(TESTDIR, 'test_latex_formatter.py')
 
 
-class LatexFormatterTest(unittest.TestCase):
-
-    def test_valid_output(self):
-        with open(TESTFILE) as fp:
-            tokensource = list(PythonLexer().get_tokens(fp.read()))
-        fmt = LatexFormatter(full=True, encoding='latin1')
-
-        handle, pathname = tempfile.mkstemp('.tex')
-        # place all output files in /tmp too
-        old_wd = os.getcwd()
-        os.chdir(os.path.dirname(pathname))
-        tfile = os.fdopen(handle, 'wb')
-        fmt.format(tokensource, tfile)
-        tfile.close()
-        try:
-            import subprocess
-            po = subprocess.Popen(['latex', '-interaction=nonstopmode',
-                                   pathname], stdout=subprocess.PIPE)
-            ret = po.wait()
-            output = po.stdout.read()
-            po.stdout.close()
-        except OSError as e:
-            # latex not available
-            pytest.skip(str(e))
-        else:
-            if ret:
-                print(output)
-            self.assertFalse(ret, 'latex run reported errors')
-
-        os.unlink(pathname)
-        os.chdir(old_wd)
+def test_valid_output():
+    with open(TESTFILE) as fp:
+        tokensource = list(PythonLexer().get_tokens(fp.read()))
+    fmt = LatexFormatter(full=True, encoding='latin1')
+
+    handle, pathname = tempfile.mkstemp('.tex')
+    # place all output files in /tmp too
+    old_wd = os.getcwd()
+    os.chdir(os.path.dirname(pathname))
+    tfile = os.fdopen(handle, 'wb')
+    fmt.format(tokensource, tfile)
+    tfile.close()
+    try:
+        import subprocess
+        po = subprocess.Popen(['latex', '-interaction=nonstopmode',
+                               pathname], stdout=subprocess.PIPE)
+        ret = po.wait()
+        output = po.stdout.read()
+        po.stdout.close()
+    except OSError as e:
+        # latex not available
+        pytest.skip(str(e))
+    else:
+        if ret:
+            print(output)
+        assert not ret, 'latex run reported errors'
+
+    os.unlink(pathname)
+    os.chdir(old_wd)
diff --git a/tests/test_lexers_other.py b/tests/test_lexers_other.py
index 3716fb72..3e8d3fc1 100644
--- a/tests/test_lexers_other.py
+++ b/tests/test_lexers_other.py
@@ -3,78 +3,68 @@
     Tests for other lexers
     ~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
+
 import glob
 import os
-import unittest
+
+import pytest
 
 from pygments.lexers import guess_lexer
 from pygments.lexers.scripting import EasytrieveLexer, JclLexer, RexxLexer
 
 
-def _exampleFilePath(filename):
+def _example_file_path(filename):
     return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
 
 
-class AnalyseTextTest(unittest.TestCase):
-    def _testCanRecognizeAndGuessExampleFiles(self, lexer):
-        assert lexer is not None
-
-        for pattern in lexer.filenames:
-            exampleFilesPattern = _exampleFilePath(pattern)
-            for exampleFilePath in glob.glob(exampleFilesPattern):
-                with open(exampleFilePath, 'rb') as fp:
-                    text = fp.read().decode('utf-8')
-                probability = lexer.analyse_text(text)
-                self.assertTrue(probability > 0,
-                                '%s must recognize %r' % (
-                                    lexer.name, exampleFilePath))
-                guessedLexer = guess_lexer(text)
-                self.assertEqual(guessedLexer.name, lexer.name)
-
-    def testCanRecognizeAndGuessExampleFiles(self):
-        LEXERS_TO_TEST = [
-            EasytrieveLexer,
-            JclLexer,
-            RexxLexer,
-        ]
-        for lexerToTest in LEXERS_TO_TEST:
-            self._testCanRecognizeAndGuessExampleFiles(lexerToTest)
+@pytest.mark.parametrize('lexer', [
+    EasytrieveLexer,
+    JclLexer,
+    RexxLexer,
+])
+def test_can_recognize_and_guess_example_files(lexer):
+    for pattern in lexer.filenames:
+        exampleFilesPattern = _example_file_path(pattern)
+        for exampleFilePath in glob.glob(exampleFilesPattern):
+            with open(exampleFilePath, 'rb') as fp:
+                text = fp.read().decode('utf-8')
+            probability = lexer.analyse_text(text)
+            assert probability > 0, '%s must recognize %r' % (
+                lexer.name, exampleFilePath)
+            guessedLexer = guess_lexer(text)
+            assert guessedLexer.name == lexer.name
 
 
-class EasyTrieveLexerTest(unittest.TestCase):
-    def testCanGuessFromText(self):
-        self.assertTrue(EasytrieveLexer.analyse_text('MACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text('\nMACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text(' \nMACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text(' \n MACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text('*\nMACRO'))
-        self.assertTrue(EasytrieveLexer.analyse_text(
-            '*\n *\n\n \n*\n MACRO'))
+def test_easytrieve_can_guess_from_text():
+    assert EasytrieveLexer.analyse_text('MACRO')
+    assert EasytrieveLexer.analyse_text('\nMACRO')
+    assert EasytrieveLexer.analyse_text(' \nMACRO')
+    assert EasytrieveLexer.analyse_text(' \n MACRO')
+    assert EasytrieveLexer.analyse_text('*\nMACRO')
+    assert EasytrieveLexer.analyse_text('*\n *\n\n \n*\n MACRO')
 
 
-class RexxLexerTest(unittest.TestCase):
-    def testCanGuessFromText(self):
-        self.assertAlmostEqual(0.01, RexxLexer.analyse_text('/* */'))
-        self.assertAlmostEqual(1.0,
-                               RexxLexer.analyse_text('''/* Rexx */
-                say "hello world"'''))
-        val = RexxLexer.analyse_text('/* */\n'
-                                     'hello:pRoceduRe\n'
-                                     '  say "hello world"')
-        self.assertTrue(val > 0.5, val)
-        val = RexxLexer.analyse_text('''/* */
-                if 1 > 0 then do
-                    say "ok"
-                end
-                else do
-                    say "huh?"
-                end''')
-        self.assertTrue(val > 0.2, val)
-        val = RexxLexer.analyse_text('''/* */
-                greeting = "hello world!"
-                parse value greeting "hello" name "!"
-                say name''')
-        self.assertTrue(val > 0.2, val)
+def test_rexx_can_guess_from_text():
+    assert RexxLexer.analyse_text('/* */') == pytest.approx(0.01)
+    assert RexxLexer.analyse_text('''/* Rexx */
+            say "hello world"''') == pytest.approx(1.0)
+    val = RexxLexer.analyse_text('/* */\n'
+                                 'hello:pRoceduRe\n'
+                                 '  say "hello world"')
+    assert val > 0.5
+    val = RexxLexer.analyse_text('''/* */
+            if 1 > 0 then do
+                say "ok"
+            end
+            else do
+                say "huh?"
+            end''')
+    assert val > 0.2
+    val = RexxLexer.analyse_text('''/* */
+            greeting = "hello world!"
+            parse value greeting "hello" name "!"
+            say name''')
+    assert val > 0.2
diff --git a/tests/test_markdown_lexer.py b/tests/test_markdown_lexer.py
index 16d1f28d..9024bf07 100644
--- a/tests/test_markdown_lexer.py
+++ b/tests/test_markdown_lexer.py
@@ -1,31 +1,36 @@
 # -*- coding: utf-8 -*-
 """
-    Pygments regex lexer tests
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~
+    Pygments Markdown lexer tests
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
-import unittest
+
+import pytest
 
 from pygments.lexers.markup import MarkdownLexer
 
 
-class SameTextTests(unittest.TestCase):
+@pytest.fixture(scope='module')
+def lexer():
+    yield MarkdownLexer()
+
+
+def assert_same_text(lexer, text):
+    """Show that lexed markdown does not remove any content. """
+    tokens = list(lexer.get_tokens_unprocessed(text))
+    output = ''.join(t[2] for t in tokens)
+    assert text == output
+
 
-    lexer = MarkdownLexer()
+def test_code_fence(lexer):
+    assert_same_text(lexer, r'```\nfoo\n```\n')
 
-    def assert_same_text(self, text):
-        """Show that lexed markdown does not remove any content. """
-        tokens = list(self.lexer.get_tokens_unprocessed(text))
-        output = ''.join(t[2] for t in tokens)
-        self.assertEqual(text, output)
 
-    def test_code_fence(self):
-        self.assert_same_text(r'```\nfoo\n```\n')
+def test_code_fence_gsm(lexer):
+    assert_same_text(lexer, r'```markdown\nfoo\n```\n')
 
-    def test_code_fence_gsm(self):
-        self.assert_same_text(r'```markdown\nfoo\n```\n')
 
-    def test_code_fence_gsm_with_no_lexer(self):
-        self.assert_same_text(r'```invalid-lexer\nfoo\n```\n')
+def test_code_fence_gsm_with_no_lexer(lexer):
+    assert_same_text(lexer, r'```invalid-lexer\nfoo\n```\n')
diff --git a/tests/test_objectiveclexer.py b/tests/test_objectiveclexer.py
index 2a5fbe21..d5161d6c 100644
--- a/tests/test_objectiveclexer.py
+++ b/tests/test_objectiveclexer.py
@@ -7,74 +7,78 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers import ObjectiveCLexer
 
 
-class ObjectiveCLexerTest(unittest.TestCase):
+@pytest.fixture(scope='module')
+def lexer():
+    yield ObjectiveCLexer()
 
-    def setUp(self):
-        self.lexer = ObjectiveCLexer()
 
-    def testLiteralNumberInt(self):
-        fragment = u'@(1);\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
+def test_literal_number_int(lexer):
+    fragment = u'@(1);\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
 
-    def testLiteralNumberExpression(self):
-        fragment = u'@(1+2);\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'+'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
 
-    def testLiteralNumberNestedExpression(self):
-        fragment = u'@(1+(2+3));\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Literal.Number.Integer, u'1'),
-            (Token.Operator, u'+'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Operator, u'+'),
-            (Token.Literal.Number.Integer, u'3'),
-            (Token.Punctuation, u')'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
+def test_literal_number_expression(lexer):
+    fragment = u'@(1+2);\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'+'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
 
-    def testLiteralNumberBool(self):
-        fragment = u'@NO;\n'
-        expected = [
-            (Token.Literal.Number, u'@NO'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
 
-    def testLieralNumberBoolExpression(self):
-        fragment = u'@(YES);\n'
-        expected = [
-            (Token.Literal, u'@('),
-            (Token.Name.Builtin, u'YES'),
-            (Token.Literal, u')'),
-            (Token.Punctuation, u';'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
+def test_literal_number_nested_expression(lexer):
+    fragment = u'@(1+(2+3));\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Literal.Number.Integer, u'1'),
+        (Token.Operator, u'+'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Operator, u'+'),
+        (Token.Literal.Number.Integer, u'3'),
+        (Token.Punctuation, u')'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
+
+
+def test_literal_number_bool(lexer):
+    fragment = u'@NO;\n'
+    expected = [
+        (Token.Literal.Number, u'@NO'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
+
+
+def test_literal_number_bool_expression(lexer):
+    fragment = u'@(YES);\n'
+    expected = [
+        (Token.Literal, u'@('),
+        (Token.Name.Builtin, u'YES'),
+        (Token.Literal, u')'),
+        (Token.Punctuation, u';'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
diff --git a/tests/test_perllexer.py b/tests/test_perllexer.py
index 9845d9d6..88ddacff 100644
--- a/tests/test_perllexer.py
+++ b/tests/test_perllexer.py
@@ -8,145 +8,175 @@
 """
 
 import time
-import unittest
+
+import pytest
 
 from pygments.token import Keyword, Name, String, Text
 from pygments.lexers.perl import PerlLexer
 
 
-class RunawayRegexTest(unittest.TestCase):
-    # A previous version of the Perl lexer would spend a great deal of
-    # time backtracking when given particular strings.  These tests show that
-    # the runaway backtracking doesn't happen any more (at least for the given
-    # cases).
+@pytest.fixture(scope='module')
+def lexer():
+    yield PerlLexer()
+
+
+# Test runaway regexes.
+# A previous version of the Perl lexer would spend a great deal of
+# time backtracking when given particular strings.  These tests show that
+# the runaway backtracking doesn't happen any more (at least for the given
+# cases).
+
+
+# Test helpers.
+
+def assert_single_token(lexer, s, token):
+    """Show that a given string generates only one token."""
+    tokens = list(lexer.get_tokens_unprocessed(s))
+    assert len(tokens) == 1
+    assert s == tokens[0][2]
+    assert token == tokens[0][1]
+
+
+def assert_tokens(lexer, strings, expected_tokens):
+    """Show that a given string generates the expected tokens."""
+    tokens = list(lexer.get_tokens_unprocessed(''.join(strings)))
+    assert len(tokens) == len(expected_tokens)
+    for index, s in enumerate(strings):
+        assert s == tokens[index][2]
+        assert expected_tokens[index] == tokens[index][1]
+
+
+def assert_fast_tokenization(lexer, s):
+    """Show that a given string is tokenized quickly."""
+    start = time.time()
+    tokens = list(lexer.get_tokens_unprocessed(s))
+    end = time.time()
+    # Isn't 10 seconds kind of a long time?  Yes, but we don't want false
+    # positives when the tests are starved for CPU time.
+    if end-start > 10:
+        pytest.fail('tokenization took too long')
+    return tokens
+
+
+# Strings.
+
+def test_single_quote_strings(lexer):
+    assert_single_token(lexer, r"'foo\tbar\\\'baz'", String)
+    assert_fast_tokenization(lexer, "'" + '\\'*999)
+
+
+def test_double_quote_strings(lexer):
+    assert_single_token(lexer, r'"foo\tbar\\\"baz"', String)
+    assert_fast_tokenization(lexer, '"' + '\\'*999)
+
+
+def test_backtick_strings(lexer):
+    assert_single_token(lexer, r'`foo\tbar\\\`baz`', String.Backtick)
+    assert_fast_tokenization(lexer, '`' + '\\'*999)
+
+
+# Regex matches with various delimiters.
+
+def test_match(lexer):
+    assert_single_token(lexer, r'/aa\tbb/', String.Regex)
+    assert_fast_tokenization(lexer, '/' + '\\'*999)
+
+
+def test_match_with_slash(lexer):
+    assert_tokens(lexer, ['m', '/\n\\t\\\\/'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm/xxx\n' + '\\'*999)
+
+
+def test_match_with_bang(lexer):
+    assert_tokens(lexer, ['m', r'!aa\t\!bb!'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm!' + '\\'*999)
+
+
+def test_match_with_brace(lexer):
+    assert_tokens(lexer, ['m', r'{aa\t\}bb}'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm{' + '\\'*999)
+
+
+def test_match_with_angle_brackets(lexer):
+    assert_tokens(lexer, ['m', r'bb>'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm<' + '\\'*999)
+
+
+def test_match_with_parenthesis(lexer):
+    assert_tokens(lexer, ['m', r'(aa\t\)bb)'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm(' + '\\'*999)
+
+
+def test_match_with_at_sign(lexer):
+    assert_tokens(lexer, ['m', r'@aa\t\@bb@'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm@' + '\\'*999)
+
 
-    lexer = PerlLexer()
+def test_match_with_percent_sign(lexer):
+    assert_tokens(lexer, ['m', r'%aa\t\%bb%'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm%' + '\\'*999)
 
-    ### Test helpers.
 
-    def assert_single_token(self, s, token):
-        """Show that a given string generates only one token."""
-        tokens = list(self.lexer.get_tokens_unprocessed(s))
-        self.assertEqual(len(tokens), 1, tokens)
-        self.assertEqual(s, tokens[0][2])
-        self.assertEqual(token, tokens[0][1])
+def test_match_with_dollar_sign(lexer):
+    assert_tokens(lexer, ['m', r'$aa\t\$bb$'], [String.Regex, String.Regex])
+    assert_fast_tokenization(lexer, 'm$' + '\\'*999)
 
-    def assert_tokens(self, strings, expected_tokens):
-        """Show that a given string generates the expected tokens."""
-        tokens = list(self.lexer.get_tokens_unprocessed(''.join(strings)))
-        self.assertEqual(len(tokens), len(expected_tokens), tokens)
-        for index, s in enumerate(strings):
-            self.assertEqual(s, tokens[index][2])
-            self.assertEqual(expected_tokens[index], tokens[index][1])
 
-    def assert_fast_tokenization(self, s):
-        """Show that a given string is tokenized quickly."""
-        start = time.time()
-        tokens = list(self.lexer.get_tokens_unprocessed(s))
-        end = time.time()
-        # Isn't 10 seconds kind of a long time?  Yes, but we don't want false
-        # positives when the tests are starved for CPU time.
-        if end-start > 10:
-            self.fail('tokenization took too long')
-        return tokens
+# Regex substitutions with various delimeters.
 
-    ### Strings.
+def test_substitution_with_slash(lexer):
+    assert_single_token(lexer, 's/aaa/bbb/g', String.Regex)
+    assert_fast_tokenization(lexer, 's/foo/' + '\\'*999)
 
-    def test_single_quote_strings(self):
-        self.assert_single_token(r"'foo\tbar\\\'baz'", String)
-        self.assert_fast_tokenization("'" + '\\'*999)
 
-    def test_double_quote_strings(self):
-        self.assert_single_token(r'"foo\tbar\\\"baz"', String)
-        self.assert_fast_tokenization('"' + '\\'*999)
+def test_substitution_with_at_sign(lexer):
+    assert_single_token(lexer, r's@aaa@bbb@g', String.Regex)
+    assert_fast_tokenization(lexer, 's@foo@' + '\\'*999)
 
-    def test_backtick_strings(self):
-        self.assert_single_token(r'`foo\tbar\\\`baz`', String.Backtick)
-        self.assert_fast_tokenization('`' + '\\'*999)
 
-    ### Regex matches with various delimiters.
+def test_substitution_with_percent_sign(lexer):
+    assert_single_token(lexer, r's%aaa%bbb%g', String.Regex)
+    assert_fast_tokenization(lexer, 's%foo%' + '\\'*999)
 
-    def test_match(self):
-        self.assert_single_token(r'/aa\tbb/', String.Regex)
-        self.assert_fast_tokenization('/' + '\\'*999)
 
-    def test_match_with_slash(self):
-        self.assert_tokens(['m', '/\n\\t\\\\/'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m/xxx\n' + '\\'*999)
+def test_substitution_with_brace(lexer):
+    assert_single_token(lexer, r's{aaa}', String.Regex)
+    assert_fast_tokenization(lexer, 's{' + '\\'*999)
 
-    def test_match_with_bang(self):
-        self.assert_tokens(['m', r'!aa\t\!bb!'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m!' + '\\'*999)
 
-    def test_match_with_brace(self):
-        self.assert_tokens(['m', r'{aa\t\}bb}'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m{' + '\\'*999)
+def test_substitution_with_angle_bracket(lexer):
+    assert_single_token(lexer, r's', String.Regex)
+    assert_fast_tokenization(lexer, 's<' + '\\'*999)
 
-    def test_match_with_angle_brackets(self):
-        self.assert_tokens(['m', r'bb>'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m<' + '\\'*999)
 
-    def test_match_with_parenthesis(self):
-        self.assert_tokens(['m', r'(aa\t\)bb)'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m(' + '\\'*999)
+def test_substitution_with_square_bracket(lexer):
+    assert_single_token(lexer, r's[aaa]', String.Regex)
+    assert_fast_tokenization(lexer, 's[' + '\\'*999)
 
-    def test_match_with_at_sign(self):
-        self.assert_tokens(['m', r'@aa\t\@bb@'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m@' + '\\'*999)
 
-    def test_match_with_percent_sign(self):
-        self.assert_tokens(['m', r'%aa\t\%bb%'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m%' + '\\'*999)
+def test_substitution_with_parenthesis(lexer):
+    assert_single_token(lexer, r's(aaa)', String.Regex)
+    assert_fast_tokenization(lexer, 's(' + '\\'*999)
 
-    def test_match_with_dollar_sign(self):
-        self.assert_tokens(['m', r'$aa\t\$bb$'], [String.Regex, String.Regex])
-        self.assert_fast_tokenization('m$' + '\\'*999)
 
-    ### Regex substitutions with various delimeters.
+# Namespaces/modules
 
-    def test_substitution_with_slash(self):
-        self.assert_single_token('s/aaa/bbb/g', String.Regex)
-        self.assert_fast_tokenization('s/foo/' + '\\'*999)
+def test_package_statement(lexer):
+    assert_tokens(lexer, ['package', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['package', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_substitution_with_at_sign(self):
-        self.assert_single_token(r's@aaa@bbb@g', String.Regex)
-        self.assert_fast_tokenization('s@foo@' + '\\'*999)
 
-    def test_substitution_with_percent_sign(self):
-        self.assert_single_token(r's%aaa%bbb%g', String.Regex)
-        self.assert_fast_tokenization('s%foo%' + '\\'*999)
-
-    def test_substitution_with_brace(self):
-        self.assert_single_token(r's{aaa}', String.Regex)
-        self.assert_fast_tokenization('s{' + '\\'*999)
+def test_use_statement(lexer):
+    assert_tokens(lexer, ['use', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['use', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_substitution_with_angle_bracket(self):
-        self.assert_single_token(r's', String.Regex)
-        self.assert_fast_tokenization('s<' + '\\'*999)
-
-    def test_substitution_with_square_bracket(self):
-        self.assert_single_token(r's[aaa]', String.Regex)
-        self.assert_fast_tokenization('s[' + '\\'*999)
-
-    def test_substitution_with_parenthesis(self):
-        self.assert_single_token(r's(aaa)', String.Regex)
-        self.assert_fast_tokenization('s(' + '\\'*999)
-
-    ### Namespaces/modules
-
-    def test_package_statement(self):
-        self.assert_tokens(['package', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['package', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_use_statement(self):
-        self.assert_tokens(['use', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['use', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
+def test_no_statement(lexer):
+    assert_tokens(lexer, ['no', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['no', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_no_statement(self):
-        self.assert_tokens(['no', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['no', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
 
-    def test_require_statement(self):
-        self.assert_tokens(['require', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['require', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
-        self.assert_tokens(['require', ' ', '"Foo/Bar.pm"'], [Keyword, Text, String])
+def test_require_statement(lexer):
+    assert_tokens(lexer, ['require', ' ', 'Foo'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['require', '  ', 'Foo::Bar'], [Keyword, Text, Name.Namespace])
+    assert_tokens(lexer, ['require', ' ', '"Foo/Bar.pm"'], [Keyword, Text, String])
diff --git a/tests/test_php.py b/tests/test_php.py
index b4117381..1660183a 100644
--- a/tests/test_php.py
+++ b/tests/test_php.py
@@ -3,34 +3,35 @@
     PHP Tests
     ~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import PhpLexer
 from pygments.token import Token
 
 
-class PhpTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = PhpLexer()
+@pytest.fixture(scope='module')
+def lexer():
+    yield PhpLexer()
 
-    def testStringEscapingRun(self):
-        fragment = '\n'
-        tokens = [
-            (Token.Comment.Preproc, ''),
-            (Token.Other, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_string_escaping_run(lexer):
+    fragment = '\n'
+    tokens = [
+        (Token.Comment.Preproc, ''),
+        (Token.Other, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_praat.py b/tests/test_praat.py
index 1ca97d1e..dd27ca7c 100644
--- a/tests/test_praat.py
+++ b/tests/test_praat.py
@@ -7,124 +7,129 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers import PraatLexer
 
-class PraatTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = PraatLexer()
-        self.maxDiff = None
-
-    def testNumericAssignment(self):
-        fragment = u'var = -15e4\n'
-        tokens = [
-            (Token.Text, u'var'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Literal.Number, u'15e4'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testStringAssignment(self):
-        fragment = u'var$ = "foo"\n'
-        tokens = [
-            (Token.Text, u'var$'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'foo'),
-            (Token.Literal.String, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testStringEscapedQuotes(self):
-        fragment = u'"it said ""foo"""\n'
-        tokens = [
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'it said '),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'foo'),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testFunctionCall(self):
-        fragment = u'selected("Sound", i+(a*b))\n'
-        tokens = [
-            (Token.Name.Function, u'selected'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.String, u'"'),
-            (Token.Literal.String, u'Sound'),
-            (Token.Literal.String, u'"'),
-            (Token.Punctuation, u','),
-            (Token.Text, u' '),
-            (Token.Text, u'i'),
-            (Token.Operator, u'+'),
-            (Token.Text, u'('),
-            (Token.Text, u'a'),
-            (Token.Operator, u'*'),
-            (Token.Text, u'b'),
-            (Token.Text, u')'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testBrokenUnquotedString(self):
-        fragment = u'printline string\n... \'interpolated\' string\n'
-        tokens = [
-            (Token.Keyword, u'printline'),
-            (Token.Text, u' '),
-            (Token.Literal.String, u'string'),
-            (Token.Text, u'\n'),
-            (Token.Punctuation, u'...'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Interpol, u"'"),
-            (Token.Literal.String.Interpol, u'interpolated'),
-            (Token.Literal.String.Interpol, u"'"),
-            (Token.Text, u' '),
-            (Token.Literal.String, u'string'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testInlinIf(self):
-        fragment = u'var = if true == 1 then -1 else 0 fi'
-        tokens = [
-            (Token.Text, u'var'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Keyword, u'if'),
-            (Token.Text, u' '),
-            (Token.Text, u'true'),
-            (Token.Text, u' '),
-            (Token.Operator, u'=='),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'then'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'else'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'0'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'fi'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+@pytest.fixture(scope='module')
+def lexer():
+    yield PraatLexer()
+
+
+def test_numeric_assignment(lexer):
+    fragment = u'var = -15e4\n'
+    tokens = [
+        (Token.Text, u'var'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Literal.Number, u'15e4'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def testStringAssignment(lexer):
+    fragment = u'var$ = "foo"\n'
+    tokens = [
+        (Token.Text, u'var$'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'foo'),
+        (Token.Literal.String, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_string_escaped_quotes(lexer):
+    fragment = u'"it said ""foo"""\n'
+    tokens = [
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'it said '),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'foo'),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_function_call(lexer):
+    fragment = u'selected("Sound", i+(a*b))\n'
+    tokens = [
+        (Token.Name.Function, u'selected'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.String, u'"'),
+        (Token.Literal.String, u'Sound'),
+        (Token.Literal.String, u'"'),
+        (Token.Punctuation, u','),
+        (Token.Text, u' '),
+        (Token.Text, u'i'),
+        (Token.Operator, u'+'),
+        (Token.Text, u'('),
+        (Token.Text, u'a'),
+        (Token.Operator, u'*'),
+        (Token.Text, u'b'),
+        (Token.Text, u')'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_broken_unquoted_string(lexer):
+    fragment = u'printline string\n... \'interpolated\' string\n'
+    tokens = [
+        (Token.Keyword, u'printline'),
+        (Token.Text, u' '),
+        (Token.Literal.String, u'string'),
+        (Token.Text, u'\n'),
+        (Token.Punctuation, u'...'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Interpol, u"'"),
+        (Token.Literal.String.Interpol, u'interpolated'),
+        (Token.Literal.String.Interpol, u"'"),
+        (Token.Text, u' '),
+        (Token.Literal.String, u'string'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_inline_if(lexer):
+    fragment = u'var = if true == 1 then -1 else 0 fi'
+    tokens = [
+        (Token.Text, u'var'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Keyword, u'if'),
+        (Token.Text, u' '),
+        (Token.Text, u'true'),
+        (Token.Text, u' '),
+        (Token.Operator, u'=='),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'then'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'else'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'0'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'fi'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_properties.py b/tests/test_properties.py
index 562778ba..25368d93 100644
--- a/tests/test_properties.py
+++ b/tests/test_properties.py
@@ -3,87 +3,94 @@
     Properties Tests
     ~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers.configs import PropertiesLexer
 from pygments.token import Token
 
 
-class PropertiesTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = PropertiesLexer()
-
-    def test_comments(self):
-        """
-        Assures lines lead by either # or ! are recognized as a comment
-        """
-        fragment = '! a comment\n# also a comment\n'
-        tokens = [
-            (Token.Comment, '! a comment'),
-            (Token.Text, '\n'),
-            (Token.Comment, '# also a comment'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_leading_whitespace_comments(self):
-        fragment = '    # comment\n'
-        tokens = [
-            (Token.Text, '    '),
-            (Token.Comment, '# comment'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_escaped_space_in_key(self):
-        fragment = 'key = value\n'
-        tokens = [
-            (Token.Name.Attribute, 'key'),
-            (Token.Text, ' '),
-            (Token.Operator, '='),
-            (Token.Text, ' '),
-            (Token.Literal.String, 'value'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_escaped_space_in_value(self):
-        fragment = 'key = doubleword\\ value\n'
-        tokens = [
-            (Token.Name.Attribute, 'key'),
-            (Token.Text, ' '),
-            (Token.Operator, '='),
-            (Token.Text, ' '),
-            (Token.Literal.String, 'doubleword\\ value'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_space_delimited_kv_pair(self):
-        fragment = 'key value\n'
-        tokens = [
-            (Token.Name.Attribute, 'key'),
-            (Token.Text, ' '),
-            (Token.Literal.String, 'value\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_just_key(self):
-        fragment = 'justkey\n'
-        tokens = [
-            (Token.Name.Attribute, 'justkey'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_just_key_with_space(self):
-        fragment = 'just\\ key\n'
-        tokens = [
-            (Token.Name.Attribute, 'just\\ key'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer():
+    yield PropertiesLexer()
+
+
+def test_comments(lexer):
+    """
+    Assures lines lead by either # or ! are recognized as a comment
+    """
+    fragment = '! a comment\n# also a comment\n'
+    tokens = [
+        (Token.Comment, '! a comment'),
+        (Token.Text, '\n'),
+        (Token.Comment, '# also a comment'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_leading_whitespace_comments(lexer):
+    fragment = '    # comment\n'
+    tokens = [
+        (Token.Text, '    '),
+        (Token.Comment, '# comment'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_escaped_space_in_key(lexer):
+    fragment = 'key = value\n'
+    tokens = [
+        (Token.Name.Attribute, 'key'),
+        (Token.Text, ' '),
+        (Token.Operator, '='),
+        (Token.Text, ' '),
+        (Token.Literal.String, 'value'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_escaped_space_in_value(lexer):
+    fragment = 'key = doubleword\\ value\n'
+    tokens = [
+        (Token.Name.Attribute, 'key'),
+        (Token.Text, ' '),
+        (Token.Operator, '='),
+        (Token.Text, ' '),
+        (Token.Literal.String, 'doubleword\\ value'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_space_delimited_kv_pair(lexer):
+    fragment = 'key value\n'
+    tokens = [
+        (Token.Name.Attribute, 'key'),
+        (Token.Text, ' '),
+        (Token.Literal.String, 'value\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_just_key(lexer):
+    fragment = 'justkey\n'
+    tokens = [
+        (Token.Name.Attribute, 'justkey'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_just_key_with_space(lexer):
+    fragment = 'just\\ key\n'
+    tokens = [
+        (Token.Name.Attribute, 'just\\ key'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_python.py b/tests/test_python.py
index 6445022c..4e5d5bbf 100644
--- a/tests/test_python.py
+++ b/tests/test_python.py
@@ -3,131 +3,133 @@
     Python Tests
     ~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import PythonLexer, Python3Lexer
 from pygments.token import Token
 
 
-class PythonTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = PythonLexer()
+@pytest.fixture(scope='module')
+def lexer2():
+    yield PythonLexer()
 
-    def test_cls_builtin(self):
-        """
-        Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
 
-        """
-        fragment = 'class TestClass():\n    @classmethod\n    def hello(cls):\n        pass\n'
-        tokens = [
-            (Token.Keyword, 'class'),
-            (Token.Text, ' '),
-            (Token.Name.Class, 'TestClass'),
-            (Token.Punctuation, '('),
-            (Token.Punctuation, ')'),
-            (Token.Punctuation, ':'),
-            (Token.Text, '\n'),
-            (Token.Text, '    '),
-            (Token.Name.Decorator, '@classmethod'),
-            (Token.Text, '\n'),
-            (Token.Text, '    '),
-            (Token.Keyword, 'def'),
-            (Token.Text, ' '),
-            (Token.Name.Function, 'hello'),
-            (Token.Punctuation, '('),
-            (Token.Name.Builtin.Pseudo, 'cls'),
-            (Token.Punctuation, ')'),
-            (Token.Punctuation, ':'),
-            (Token.Text, '\n'),
-            (Token.Text, '        '),
-            (Token.Keyword, 'pass'),
-            (Token.Text, '\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer3():
+    yield Python3Lexer()
+
+
+def test_cls_builtin(lexer2):
+    """
+    Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo
+    """
+    fragment = 'class TestClass():\n    @classmethod\n    def hello(cls):\n        pass\n'
+    tokens = [
+        (Token.Keyword, 'class'),
+        (Token.Text, ' '),
+        (Token.Name.Class, 'TestClass'),
+        (Token.Punctuation, '('),
+        (Token.Punctuation, ')'),
+        (Token.Punctuation, ':'),
+        (Token.Text, '\n'),
+        (Token.Text, '    '),
+        (Token.Name.Decorator, '@classmethod'),
+        (Token.Text, '\n'),
+        (Token.Text, '    '),
+        (Token.Keyword, 'def'),
+        (Token.Text, ' '),
+        (Token.Name.Function, 'hello'),
+        (Token.Punctuation, '('),
+        (Token.Name.Builtin.Pseudo, 'cls'),
+        (Token.Punctuation, ')'),
+        (Token.Punctuation, ':'),
+        (Token.Text, '\n'),
+        (Token.Text, '        '),
+        (Token.Keyword, 'pass'),
+        (Token.Text, '\n'),
+    ]
+    assert list(lexer2.get_tokens(fragment)) == tokens
+
 
+def test_needs_name(lexer3):
+    """
+    Tests that '@' is recognized as an Operator
+    """
+    fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
+    tokens = [
+        (Token.Name, u'S'),
+        (Token.Text, u' '),
+        (Token.Operator, u'='),
+        (Token.Text, u' '),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'H'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'beta'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Text, u' '),
+        (Token.Name, u'r'),
+        (Token.Punctuation, u')'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'T'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'inv'),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'H'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'V'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'H'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'T'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Punctuation, u'('),
+        (Token.Name, u'H'),
+        (Token.Text, u' '),
+        (Token.Operator, u'@'),
+        (Token.Text, u' '),
+        (Token.Name, u'beta'),
+        (Token.Text, u' '),
+        (Token.Operator, u'-'),
+        (Token.Text, u' '),
+        (Token.Name, u'r'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer3.get_tokens(fragment)) == tokens
 
-class Python3Test(unittest.TestCase):
-    def setUp(self):
-        self.lexer = Python3Lexer()
-        
-    def testNeedsName(self):
-        """
-        Tests that '@' is recognized as an Operator
-        """
-        fragment = u'S = (H @ beta - r).T @ inv(H @ V @ H.T) @ (H @ beta - r)\n'
+
+def test_pep_515(lexer3):
+    """
+    Tests that the lexer can parse numeric literals with underscores
+    """
+    fragments = (
+        (Token.Literal.Number.Integer, u'1_000_000'),
+        (Token.Literal.Number.Float, u'1_000.000_001'),
+        (Token.Literal.Number.Float, u'1_000e1_000j'),
+        (Token.Literal.Number.Hex, u'0xCAFE_F00D'),
+        (Token.Literal.Number.Bin, u'0b_0011_1111_0100_1110'),
+        (Token.Literal.Number.Oct, u'0o_777_123'),
+    )
+
+    for token, fragment in fragments:
         tokens = [
-            (Token.Name, u'S'),
-            (Token.Text, u' '),
-            (Token.Operator, u'='),
-            (Token.Text, u' '),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'H'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'beta'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Text, u' '),
-            (Token.Name, u'r'),
-            (Token.Punctuation, u')'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'T'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'inv'),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'H'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'V'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'H'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'T'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Punctuation, u'('),
-            (Token.Name, u'H'),
-            (Token.Text, u' '),
-            (Token.Operator, u'@'),
-            (Token.Text, u' '),
-            (Token.Name, u'beta'),
-            (Token.Text, u' '),
-            (Token.Operator, u'-'),
-            (Token.Text, u' '),
-            (Token.Name, u'r'),
-            (Token.Punctuation, u')'),
+            (token, fragment),
             (Token.Text, u'\n'),
         ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def test_pep_515(self):
-        """
-        Tests that the lexer can parse numeric literals with underscores
-        """
-        fragments = (
-            (Token.Literal.Number.Integer, u'1_000_000'),
-            (Token.Literal.Number.Float, u'1_000.000_001'),
-            (Token.Literal.Number.Float, u'1_000e1_000j'),
-            (Token.Literal.Number.Hex, u'0xCAFE_F00D'),
-            (Token.Literal.Number.Bin, u'0b_0011_1111_0100_1110'),
-            (Token.Literal.Number.Oct, u'0o_777_123'),
-        )
-
-        for token, fragment in fragments:
-            tokens = [
-                (token, fragment),
-                (Token.Text, u'\n'),
-            ]
-            self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+        assert list(lexer3.get_tokens(fragment)) == tokens
diff --git a/tests/test_qbasiclexer.py b/tests/test_qbasiclexer.py
index e6212d65..3c64d69e 100644
--- a/tests/test_qbasiclexer.py
+++ b/tests/test_qbasiclexer.py
@@ -3,39 +3,39 @@
     Tests for QBasic
     ~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers.basic import QBasicLexer
 
 
-class QBasicTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = QBasicLexer()
-        self.maxDiff = None
+@pytest.fixture(scope='module')
+def lexer():
+    yield QBasicLexer()
 
-    def testKeywordsWithDollar(self):
-        fragment = u'DIM x\nx = RIGHT$("abc", 1)\n'
-        expected = [
-            (Token.Keyword.Declaration, u'DIM'),
-            (Token.Text.Whitespace, u' '),
-            (Token.Name.Variable.Global, u'x'),
-            (Token.Text, u'\n'),
-            (Token.Name.Variable.Global, u'x'),
-            (Token.Text.Whitespace, u' '),
-            (Token.Operator, u'='),
-            (Token.Text.Whitespace, u' '),
-            (Token.Keyword.Reserved, u'RIGHT$'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.String.Double, u'"abc"'),
-            (Token.Punctuation, u','),
-            (Token.Text.Whitespace, u' '),
-            (Token.Literal.Number.Integer.Long, u'1'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(expected, list(self.lexer.get_tokens(fragment)))
+
+def test_keywords_with_dollar(lexer):
+    fragment = u'DIM x\nx = RIGHT$("abc", 1)\n'
+    expected = [
+        (Token.Keyword.Declaration, u'DIM'),
+        (Token.Text.Whitespace, u' '),
+        (Token.Name.Variable.Global, u'x'),
+        (Token.Text, u'\n'),
+        (Token.Name.Variable.Global, u'x'),
+        (Token.Text.Whitespace, u' '),
+        (Token.Operator, u'='),
+        (Token.Text.Whitespace, u' '),
+        (Token.Keyword.Reserved, u'RIGHT$'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.String.Double, u'"abc"'),
+        (Token.Punctuation, u','),
+        (Token.Text.Whitespace, u' '),
+        (Token.Literal.Number.Integer.Long, u'1'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == expected
diff --git a/tests/test_r.py b/tests/test_r.py
index 70148e53..72cb8afc 100644
--- a/tests/test_r.py
+++ b/tests/test_r.py
@@ -1,70 +1,75 @@
 # -*- coding: utf-8 -*-
 """
     R Tests
-    ~~~~~~~~~
+    ~~~~~~~
 
-    :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import SLexer
 from pygments.token import Token, Name, Punctuation
 
 
-class RTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = SLexer()
+@pytest.fixture(scope='module')
+def lexer():
+    yield SLexer()
 
-    def testCall(self):
-        fragment = u'f(1, a)\n'
-        tokens = [
-            (Name.Function, u'f'),
-            (Punctuation, u'('),
-            (Token.Literal.Number, u'1'),
-            (Punctuation, u','),
-            (Token.Text, u' '),
-            (Token.Name, u'a'),
-            (Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
 
-    def testName1(self):
-        fragment = u'._a_2.c'
-        tokens = [
-            (Name, u'._a_2.c'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_call(lexer):
+    fragment = u'f(1, a)\n'
+    tokens = [
+        (Name.Function, u'f'),
+        (Punctuation, u'('),
+        (Token.Literal.Number, u'1'),
+        (Punctuation, u','),
+        (Token.Text, u' '),
+        (Token.Name, u'a'),
+        (Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
 
-    def testName2(self):
-        # Invalid names are valid if backticks are used
-        fragment = u'`.1 blah`'
-        tokens = [
-            (Name, u'`.1 blah`'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
 
-    def testName3(self):
-        # Internal backticks can be escaped
-        fragment = u'`.1 \\` blah`'
-        tokens = [
-            (Name, u'`.1 \\` blah`'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_name1(lexer):
+    fragment = u'._a_2.c'
+    tokens = [
+        (Name, u'._a_2.c'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
 
-    def testCustomOperator(self):
-        fragment = u'7 % and % 8'
-        tokens = [
-            (Token.Literal.Number, u'7'),
-            (Token.Text, u' '),
-            (Token.Operator, u'% and %'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'8'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_name2(lexer):
+    # Invalid names are valid if backticks are used
+    fragment = u'`.1 blah`'
+    tokens = [
+        (Name, u'`.1 blah`'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_name3(lexer):
+    # Internal backticks can be escaped
+    fragment = u'`.1 \\` blah`'
+    tokens = [
+        (Name, u'`.1 \\` blah`'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_custom_operator(lexer):
+    fragment = u'7 % and % 8'
+    tokens = [
+        (Token.Literal.Number, u'7'),
+        (Token.Text, u' '),
+        (Token.Operator, u'% and %'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'8'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py
index 683d6def..4e832361 100644
--- a/tests/test_regexlexer.py
+++ b/tests/test_regexlexer.py
@@ -3,15 +3,19 @@
     Pygments regex lexer tests
     ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Text
-from pygments.lexer import RegexLexer
-from pygments.lexer import default
+from pygments.lexer import RegexLexer, default
+
+
+@pytest.fixture(scope='module')
+def lexer():
+    yield MyLexer()
 
 
 class MyLexer(RegexLexer):
@@ -34,33 +38,29 @@ class MyLexer(RegexLexer):
     }
 
 
-class TupleTransTest(unittest.TestCase):
-    def test(self):
-        lx = MyLexer()
-        toks = list(lx.get_tokens_unprocessed('abcde'))
-        self.assertEqual(toks, [
-            (0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
-            (3, Text.Beer, 'd'), (4, Text.Root, 'e')])
-
-    def test_multiline(self):
-        lx = MyLexer()
-        toks = list(lx.get_tokens_unprocessed('a\ne'))
-        self.assertEqual(toks, [
-            (0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')])
-
-    def test_default(self):
-        lx = MyLexer()
-        toks = list(lx.get_tokens_unprocessed('d'))
-        self.assertEqual(toks, [(0, Text.Beer, 'd')])
-
-
-class PopEmptyTest(unittest.TestCase):
-    def test_regular(self):
-        lx = MyLexer()
-        toks = list(lx.get_tokens_unprocessed('#e'))
-        self.assertEqual(toks, [(0, Text.Root, '#'), (1, Text.Root, 'e')])
-
-    def test_tuple(self):
-        lx = MyLexer()
-        toks = list(lx.get_tokens_unprocessed('@e'))
-        self.assertEqual(toks, [(0, Text.Root, '@'), (1, Text.Root, 'e')])
+def test_tuple(lexer):
+    toks = list(lexer.get_tokens_unprocessed('abcde'))
+    assert toks == [
+        (0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
+        (3, Text.Beer, 'd'), (4, Text.Root, 'e')]
+
+
+def test_multiline(lexer):
+    toks = list(lexer.get_tokens_unprocessed('a\ne'))
+    assert toks == [
+        (0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')]
+
+
+def test_default(lexer):
+    toks = list(lexer.get_tokens_unprocessed('d'))
+    assert toks == [(0, Text.Beer, 'd')]
+
+
+def test_pop_empty_regular(lexer):
+    toks = list(lexer.get_tokens_unprocessed('#e'))
+    assert toks == [(0, Text.Root, '#'), (1, Text.Root, 'e')]
+
+
+def test_pop_empty_tuple(lexer):
+    toks = list(lexer.get_tokens_unprocessed('@e'))
+    assert toks == [(0, Text.Root, '@'), (1, Text.Root, 'e')]
diff --git a/tests/test_regexopt.py b/tests/test_regexopt.py
index 5cfb62a3..20d48dda 100644
--- a/tests/test_regexopt.py
+++ b/tests/test_regexopt.py
@@ -3,108 +3,101 @@
     Tests for pygments.regexopt
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 import re
 import random
-import unittest
-import itertools
+from itertools import combinations_with_replacement
 
 from pygments.regexopt import regex_opt
 
 ALPHABET = ['a', 'b', 'c', 'd', 'e']
 
-try:
-    from itertools import combinations_with_replacement
-    N_TRIES = 15
-except ImportError:
-    # Python 2.6
-    def combinations_with_replacement(iterable, r):
-        pool = tuple(iterable)
-        n = len(pool)
-        for indices in itertools.product(range(n), repeat=r):
-            if sorted(indices) == list(indices):
-                yield tuple(pool[i] for i in indices)
-    N_TRIES = 9
-
-
-class RegexOptTestCase(unittest.TestCase):
-
-    def generate_keywordlist(self, length):
-        return [''.join(p) for p in
-                combinations_with_replacement(ALPHABET, length)]
-
-    def test_randomly(self):
-        # generate a list of all possible keywords of a certain length using
-        # a restricted alphabet, then choose some to match and make sure only
-        # those do
-        for n in range(3, N_TRIES):
-            kwlist = self.generate_keywordlist(n)
-            to_match = random.sample(kwlist,
-                                     random.randint(1, len(kwlist) - 1))
-            no_match = set(kwlist) - set(to_match)
-            rex = re.compile(regex_opt(to_match))
-            self.assertEqual(rex.groups, 1)
-            for w in to_match:
-                self.assertTrue(rex.match(w))
-            for w in no_match:
-                self.assertFalse(rex.match(w))
-
-    def test_prefix(self):
-        opt = regex_opt(('a', 'b'), prefix=r':{1,2}')
-        print(opt)
-        rex = re.compile(opt)
-        self.assertFalse(rex.match('a'))
-        self.assertTrue(rex.match('::a'))
-        self.assertFalse(rex.match(':::')) # fullmatch
-
-    def test_suffix(self):
-        opt = regex_opt(('a', 'b'), suffix=r':{1,2}')
-        print(opt)
-        rex = re.compile(opt)
-        self.assertFalse(rex.match('a'))
-        self.assertTrue(rex.match('a::'))
-        self.assertFalse(rex.match(':::')) # fullmatch
-
-    def test_suffix_opt(self):
-        # test that detected suffixes remain sorted.
-        opt = regex_opt(('afoo', 'abfoo'))
-        print(opt)
-        rex = re.compile(opt)
-        m = rex.match('abfoo')
-        self.assertEqual(5, m.end())
-
-    def test_different_length_grouping(self):
-        opt = regex_opt(('a', 'xyz'))
-        print(opt)
-        rex = re.compile(opt)
-        self.assertTrue(rex.match('a'))
-        self.assertTrue(rex.match('xyz'))
-        self.assertFalse(rex.match('b'))
-        self.assertEqual(1, rex.groups)
-
-    def test_same_length_grouping(self):
-        opt = regex_opt(('a', 'b'))
-        print(opt)
-        rex = re.compile(opt)
-        self.assertTrue(rex.match('a'))
-        self.assertTrue(rex.match('b'))
-        self.assertFalse(rex.match('x'))
-
-        self.assertEqual(1, rex.groups)
-        groups = rex.match('a').groups()
-        self.assertEqual(('a',), groups)
-
-    def test_same_length_suffix_grouping(self):
-        opt = regex_opt(('a', 'b'), suffix='(m)')
-        print(opt)
-        rex = re.compile(opt)
-        self.assertTrue(rex.match('am'))
-        self.assertTrue(rex.match('bm'))
-        self.assertFalse(rex.match('xm'))
-        self.assertFalse(rex.match('ax'))
-        self.assertEqual(2, rex.groups)
-        groups = rex.match('am').groups()
-        self.assertEqual(('a', 'm'), groups)
+N_TRIES = 15
+
+
+def generate_keywordlist(length):
+    return [''.join(p) for p in
+            combinations_with_replacement(ALPHABET, length)]
+
+
+def test_randomly():
+    # generate a list of all possible keywords of a certain length using
+    # a restricted alphabet, then choose some to match and make sure only
+    # those do
+    for n in range(3, N_TRIES):
+        kwlist = generate_keywordlist(n)
+        to_match = random.sample(kwlist,
+                                 random.randint(1, len(kwlist) - 1))
+        no_match = set(kwlist) - set(to_match)
+        rex = re.compile(regex_opt(to_match))
+        assert rex.groups == 1
+        for w in to_match:
+            assert rex.match(w)
+        for w in no_match:
+            assert not rex.match(w)
+
+
+def test_prefix():
+    opt = regex_opt(('a', 'b'), prefix=r':{1,2}')
+    print(opt)
+    rex = re.compile(opt)
+    assert not rex.match('a')
+    assert rex.match('::a')
+    assert not rex.match(':::')  # fullmatch
+
+
+def test_suffix():
+    opt = regex_opt(('a', 'b'), suffix=r':{1,2}')
+    print(opt)
+    rex = re.compile(opt)
+    assert not rex.match('a')
+    assert rex.match('a::')
+    assert not rex.match(':::')  # fullmatch
+
+
+def test_suffix_opt():
+    # test that detected suffixes remain sorted.
+    opt = regex_opt(('afoo', 'abfoo'))
+    print(opt)
+    rex = re.compile(opt)
+    m = rex.match('abfoo')
+    assert m.end() == 5
+
+
+def test_different_length_grouping():
+    opt = regex_opt(('a', 'xyz'))
+    print(opt)
+    rex = re.compile(opt)
+    assert rex.match('a')
+    assert rex.match('xyz')
+    assert not rex.match('b')
+    assert rex.groups == 1
+
+
+def test_same_length_grouping():
+    opt = regex_opt(('a', 'b'))
+    print(opt)
+    rex = re.compile(opt)
+    assert rex.match('a')
+    assert rex.match('b')
+    assert not rex.match('x')
+
+    assert rex.groups == 1
+    groups = rex.match('a').groups()
+    assert groups == ('a',)
+
+
+def test_same_length_suffix_grouping():
+    opt = regex_opt(('a', 'b'), suffix='(m)')
+    print(opt)
+    rex = re.compile(opt)
+    assert rex.match('am')
+    assert rex.match('bm')
+    assert not rex.match('xm')
+    assert not rex.match('ax')
+    assert rex.groups == 2
+    groups = rex.match('am').groups()
+    assert groups == ('a', 'm')
diff --git a/tests/test_rtf_formatter.py b/tests/test_rtf_formatter.py
index c95bfdaf..35179df4 100644
--- a/tests/test_rtf_formatter.py
+++ b/tests/test_rtf_formatter.py
@@ -1,109 +1,107 @@
 # -*- coding: utf-8 -*-
 """
     Pygments RTF formatter tests
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
-
 from pygments.util import StringIO
 from pygments.formatters import RtfFormatter
 from pygments.lexers.special import TextLexer
 
 
-class RtfFormatterTest(unittest.TestCase):
-    foot = (r'\par' '\n' r'}')
-
-    def _escape(self, string):
-        return(string.replace("\n", r"\n"))
-
-    def _build_message(self, *args, **kwargs):
-        string = kwargs.get('string', None)
-        t = self._escape(kwargs.get('t', ''))
-        expected = self._escape(kwargs.get('expected', ''))
-        result = self._escape(kwargs.get('result', ''))
-
-        if string is None:
-            string = (u"The expected output of '{t}'\n"
-                      u"\t\tShould be '{expected}'\n"
-                      u"\t\tActually outputs '{result}'\n"
-                      u"\t(WARNING: Partial Output of Result!)")
-
-        end = -(len(self._escape(self.foot)))
-        start = end-len(expected)
-
-        return string.format(t=t,
-                             result = result[start:end],
-                             expected = expected)
-
-    def format_rtf(self, t):
-        tokensource = list(TextLexer().get_tokens(t))
-        fmt = RtfFormatter()
-        buf = StringIO()
-        fmt.format(tokensource, buf)
-        result = buf.getvalue()
-        buf.close()
-        return result
-
-    def test_rtf_header(self):
-        t = u''
-        result = self.format_rtf(t)
-        expected = r'{\rtf1\ansi\uc0'
-        msg = (u"RTF documents are expected to start with '{expected}'\n"
-               u"\t\tStarts intead with '{result}'\n"
-               u"\t(WARNING: Partial Output of Result!)".format(
-                   expected = expected,
-                   result = result[:len(expected)]))
-        self.assertTrue(result.startswith(expected), msg)
-
-    def test_rtf_footer(self):
-        t = u''
-        result = self.format_rtf(t)
-        expected = self.foot
-        msg = (u"RTF documents are expected to end with '{expected}'\n"
-               u"\t\tEnds intead with '{result}'\n"
-               u"\t(WARNING: Partial Output of Result!)".format(
-                   expected = self._escape(expected),
-                   result = self._escape(result[-len(expected):])))
-        self.assertTrue(result.endswith(expected), msg)
-
-    def test_ascii_characters(self):
-        t = u'a b c d ~'
-        result = self.format_rtf(t)
-        expected = (r'a b c d ~')
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertTrue(result.endswith(expected+self.foot), msg)
-
-    def test_escape_characters(self):
-        t = u'\\ {{'
-        result = self.format_rtf(t)
-        expected = r'\\ \{\{'
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertTrue(result.endswith(expected+self.foot), msg)
-
-    def test_single_characters(self):
-        t = u'â € ¤ каждой'
-        result = self.format_rtf(t)
-        expected = (r'{\u226} {\u8364} {\u164} '
-                    r'{\u1082}{\u1072}{\u1078}{\u1076}{\u1086}{\u1081}')
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertTrue(result.endswith(expected+self.foot), msg)
-
-    def test_double_characters(self):
-        t = u'က 힣 ↕ ↕︎ 鼖'
-        result = self.format_rtf(t)
-        expected = (r'{\u4096} {\u55203} {\u8597} '
-                    r'{\u8597}{\u65038} {\u55422}{\u56859}')
-        if not result.endswith(self.foot):
-            return(unittest.skip('RTF Footer incorrect'))
-        msg = self._build_message(t=t, result=result, expected=expected)
-        self.assertTrue(result.endswith(expected+self.foot), msg)
+foot = (r'\par' '\n' r'}')
+
+
+def _escape(string):
+    return string.replace("\n", r"\n")
+
+
+def _build_message(*args, **kwargs):
+    string = kwargs.get('string', None)
+    t = _escape(kwargs.get('t', ''))
+    expected = _escape(kwargs.get('expected', ''))
+    result = _escape(kwargs.get('result', ''))
+
+    if string is None:
+        string = (u"The expected output of '{t}'\n"
+                  u"\t\tShould be '{expected}'\n"
+                  u"\t\tActually outputs '{result}'\n"
+                  u"\t(WARNING: Partial Output of Result!)")
+
+    end = -len(_escape(foot))
+    start = end - len(expected)
+
+    return string.format(t=t,
+                         result = result[start:end],
+                         expected = expected)
+
+
+def format_rtf(t):
+    tokensource = list(TextLexer().get_tokens(t))
+    fmt = RtfFormatter()
+    buf = StringIO()
+    fmt.format(tokensource, buf)
+    result = buf.getvalue()
+    buf.close()
+    return result
+
+
+def test_rtf_header():
+    t = u''
+    result = format_rtf(t)
+    expected = r'{\rtf1\ansi\uc0'
+    msg = (u"RTF documents are expected to start with '{expected}'\n"
+           u"\t\tStarts intead with '{result}'\n"
+           u"\t(WARNING: Partial Output of Result!)".format(
+               expected=expected,
+               result=result[:len(expected)]))
+    assert result.startswith(expected), msg
+
+
+def test_rtf_footer():
+    t = u''
+    result = format_rtf(t)
+    expected = ''
+    msg = (u"RTF documents are expected to end with '{expected}'\n"
+           u"\t\tEnds intead with '{result}'\n"
+           u"\t(WARNING: Partial Output of Result!)".format(
+               expected=_escape(expected),
+               result=_escape(result[-len(expected):])))
+    assert result.endswith(expected+foot), msg
+
+
+def test_ascii_characters():
+    t = u'a b c d ~'
+    result = format_rtf(t)
+    expected = (r'a b c d ~')
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
+
+
+def test_escape_characters():
+    t = u'\\ {{'
+    result = format_rtf(t)
+    expected = r'\\ \{\{'
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
+
+
+def test_single_characters():
+    t = u'â € ¤ каждой'
+    result = format_rtf(t)
+    expected = (r'{\u226} {\u8364} {\u164} '
+                r'{\u1082}{\u1072}{\u1078}{\u1076}{\u1086}{\u1081}')
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
+
+
+def test_double_characters():
+    t = u'က 힣 ↕ ↕︎ 鼖'
+    result = format_rtf(t)
+    expected = (r'{\u4096} {\u55203} {\u8597} '
+                r'{\u8597}{\u65038} {\u55422}{\u56859}')
+    msg = _build_message(t=t, result=result, expected=expected)
+    assert result.endswith(expected+foot), msg
diff --git a/tests/test_ruby.py b/tests/test_ruby.py
index b7d4110a..a6da4bf9 100644
--- a/tests/test_ruby.py
+++ b/tests/test_ruby.py
@@ -3,143 +3,147 @@
     Basic RubyLexer Test
     ~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Operator, Number, Text, Token
 from pygments.lexers import RubyLexer
 
 
-class RubyTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = RubyLexer()
-        self.maxDiff = None
-
-    def testRangeSyntax1(self):
-        fragment = u'1..3\n'
-        tokens = [
-            (Number.Integer, u'1'),
-            (Operator, u'..'),
-            (Number.Integer, u'3'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testRangeSyntax2(self):
-        fragment = u'1...3\n'
-        tokens = [
-            (Number.Integer, u'1'),
-            (Operator, u'...'),
-            (Number.Integer, u'3'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testRangeSyntax3(self):
-        fragment = u'1 .. 3\n'
-        tokens = [
-            (Number.Integer, u'1'),
-            (Text, u' '),
-            (Operator, u'..'),
-            (Text, u' '),
-            (Number.Integer, u'3'),
-            (Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testInterpolationNestedCurly(self):
-        fragment = (
-            u'"A#{ (3..5).group_by { |x| x/2}.map '
-            u'do |k,v| "#{k}" end.join }" + "Z"\n')
-
-        tokens = [
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Double, u'A'),
-            (Token.Literal.String.Interpol, u'#{'),
-            (Token.Text, u' '),
-            (Token.Punctuation, u'('),
-            (Token.Literal.Number.Integer, u'3'),
-            (Token.Operator, u'..'),
-            (Token.Literal.Number.Integer, u'5'),
-            (Token.Punctuation, u')'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'group_by'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Interpol, u'{'),
-            (Token.Text, u' '),
-            (Token.Operator, u'|'),
-            (Token.Name, u'x'),
-            (Token.Operator, u'|'),
-            (Token.Text, u' '),
-            (Token.Name, u'x'),
-            (Token.Operator, u'/'),
-            (Token.Literal.Number.Integer, u'2'),
-            (Token.Literal.String.Interpol, u'}'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'map'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'do'),
-            (Token.Text, u' '),
-            (Token.Operator, u'|'),
-            (Token.Name, u'k'),
-            (Token.Punctuation, u','),
-            (Token.Name, u'v'),
-            (Token.Operator, u'|'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Interpol, u'#{'),
-            (Token.Name, u'k'),
-            (Token.Literal.String.Interpol, u'}'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u' '),
-            (Token.Keyword, u'end'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'join'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Interpol, u'}'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u' '),
-            (Token.Operator, u'+'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Double, u'Z'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testOperatorMethods(self):
-        fragment = u'x.==4\n'
-        tokens = [
-            (Token.Name, u'x'),
-            (Token.Operator, u'.'),
-            (Token.Name.Operator, u'=='),
-            (Token.Literal.Number.Integer, u'4'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testEscapedBracestring(self):
-        fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
-        tokens = [
-            (Token.Name, u'str'),
-            (Token.Operator, u'.'),
-            (Token.Name, u'gsub'),
-            (Token.Punctuation, u'('),
-            (Token.Literal.String.Regex, u'%r{'),
-            (Token.Literal.String.Regex, u'\\\\'),
-            (Token.Literal.String.Regex, u'\\\\'),
-            (Token.Literal.String.Regex, u'}'),
-            (Token.Punctuation, u','),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Literal.String.Double, u'/'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Punctuation, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+@pytest.fixture(scope='module')
+def lexer():
+    yield RubyLexer()
+
+
+def test_range_syntax1(lexer):
+    fragment = u'1..3\n'
+    tokens = [
+        (Number.Integer, u'1'),
+        (Operator, u'..'),
+        (Number.Integer, u'3'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_range_syntax2(lexer):
+    fragment = u'1...3\n'
+    tokens = [
+        (Number.Integer, u'1'),
+        (Operator, u'...'),
+        (Number.Integer, u'3'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_range_syntax3(lexer):
+    fragment = u'1 .. 3\n'
+    tokens = [
+        (Number.Integer, u'1'),
+        (Text, u' '),
+        (Operator, u'..'),
+        (Text, u' '),
+        (Number.Integer, u'3'),
+        (Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_interpolation_nested_curly(lexer):
+    fragment = (
+        u'"A#{ (3..5).group_by { |x| x/2}.map '
+        u'do |k,v| "#{k}" end.join }" + "Z"\n')
+
+    tokens = [
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Double, u'A'),
+        (Token.Literal.String.Interpol, u'#{'),
+        (Token.Text, u' '),
+        (Token.Punctuation, u'('),
+        (Token.Literal.Number.Integer, u'3'),
+        (Token.Operator, u'..'),
+        (Token.Literal.Number.Integer, u'5'),
+        (Token.Punctuation, u')'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'group_by'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Interpol, u'{'),
+        (Token.Text, u' '),
+        (Token.Operator, u'|'),
+        (Token.Name, u'x'),
+        (Token.Operator, u'|'),
+        (Token.Text, u' '),
+        (Token.Name, u'x'),
+        (Token.Operator, u'/'),
+        (Token.Literal.Number.Integer, u'2'),
+        (Token.Literal.String.Interpol, u'}'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'map'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'do'),
+        (Token.Text, u' '),
+        (Token.Operator, u'|'),
+        (Token.Name, u'k'),
+        (Token.Punctuation, u','),
+        (Token.Name, u'v'),
+        (Token.Operator, u'|'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Interpol, u'#{'),
+        (Token.Name, u'k'),
+        (Token.Literal.String.Interpol, u'}'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u' '),
+        (Token.Keyword, u'end'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'join'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Interpol, u'}'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u' '),
+        (Token.Operator, u'+'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Double, u'Z'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_operator_methods(lexer):
+    fragment = u'x.==4\n'
+    tokens = [
+        (Token.Name, u'x'),
+        (Token.Operator, u'.'),
+        (Token.Name.Operator, u'=='),
+        (Token.Literal.Number.Integer, u'4'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_escaped_bracestring(lexer):
+    fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
+    tokens = [
+        (Token.Name, u'str'),
+        (Token.Operator, u'.'),
+        (Token.Name, u'gsub'),
+        (Token.Punctuation, u'('),
+        (Token.Literal.String.Regex, u'%r{'),
+        (Token.Literal.String.Regex, u'\\\\'),
+        (Token.Literal.String.Regex, u'\\\\'),
+        (Token.Literal.String.Regex, u'}'),
+        (Token.Punctuation, u','),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Literal.String.Double, u'/'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Punctuation, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_shell.py b/tests/test_shell.py
index e283793e..b52f17fb 100644
--- a/tests/test_shell.py
+++ b/tests/test_shell.py
@@ -3,140 +3,143 @@
     Basic Shell Tests
     ~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers import BashLexer, BashSessionLexer
 
 
-class BashTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = BashLexer()
-        self.maxDiff = None
-
-    def testCurlyNoEscapeAndQuotes(self):
-        fragment = u'echo "${a//["b"]/}"\n'
-        tokens = [
-            (Token.Name.Builtin, u'echo'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Double, u'"'),
-            (Token.String.Interpol, u'${'),
-            (Token.Name.Variable, u'a'),
-            (Token.Punctuation, u'//['),
-            (Token.Literal.String.Double, u'"b"'),
-            (Token.Punctuation, u']/'),
-            (Token.String.Interpol, u'}'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testCurlyWithEscape(self):
-        fragment = u'echo ${a//[\\"]/}\n'
-        tokens = [
-            (Token.Name.Builtin, u'echo'),
-            (Token.Text, u' '),
-            (Token.String.Interpol, u'${'),
-            (Token.Name.Variable, u'a'),
-            (Token.Punctuation, u'//['),
-            (Token.Literal.String.Escape, u'\\"'),
-            (Token.Punctuation, u']/'),
-            (Token.String.Interpol, u'}'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testParsedSingle(self):
-        fragment = u"a=$'abc\\''\n"
-        tokens = [
-            (Token.Name.Variable, u'a'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Single, u"$'abc\\''"),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testShortVariableNames(self):
-        fragment = u'x="$"\ny="$_"\nz="$abc"\n'
-        tokens = [
-            # single lone $
-            (Token.Name.Variable, u'x'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'$'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-            # single letter shell var
-            (Token.Name.Variable, u'y'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Name.Variable, u'$_'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-            # multi-letter user var
-            (Token.Name.Variable, u'z'),
-            (Token.Operator, u'='),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Name.Variable, u'$abc'),
-            (Token.Literal.String.Double, u'"'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testArrayNums(self):
-        fragment = u'a=(1 2 3)\n'
-        tokens = [
-            (Token.Name.Variable, u'a'),
-            (Token.Operator, u'='),
-            (Token.Operator, u'('),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'2'),
-            (Token.Text, u' '),
-            (Token.Literal.Number, u'3'),
-            (Token.Operator, u')'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-    def testEndOfLineNums(self):
-        fragment = u'a=1\nb=2 # comment\n'
-        tokens = [
-            (Token.Name.Variable, u'a'),
-            (Token.Operator, u'='),
-            (Token.Literal.Number, u'1'),
-            (Token.Text, u'\n'),
-            (Token.Name.Variable, u'b'),
-            (Token.Operator, u'='),
-            (Token.Literal.Number, u'2'),
-            (Token.Text, u' '),
-            (Token.Comment.Single, u'# comment\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
-class BashSessionTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = BashSessionLexer()
-        self.maxDiff = None
-
-    def testNeedsName(self):
-        fragment = u'$ echo \\\nhi\nhi\n'
-        tokens = [
-            (Token.Text, u''),
-            (Token.Generic.Prompt, u'$'),
-            (Token.Text, u' '),
-            (Token.Name.Builtin, u'echo'),
-            (Token.Text, u' '),
-            (Token.Literal.String.Escape, u'\\\n'),
-            (Token.Text, u'hi'),
-            (Token.Text, u'\n'),
-            (Token.Generic.Output, u'hi\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
-
+@pytest.fixture(scope='module')
+def lexer():
+    yield BashLexer()
+
+
+@pytest.fixture(scope='module')
+def lexer_session():
+    yield BashSessionLexer()
+
+
+def test_curly_no_escape_and_quotes(lexer):
+    fragment = u'echo "${a//["b"]/}"\n'
+    tokens = [
+        (Token.Name.Builtin, u'echo'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Double, u'"'),
+        (Token.String.Interpol, u'${'),
+        (Token.Name.Variable, u'a'),
+        (Token.Punctuation, u'//['),
+        (Token.Literal.String.Double, u'"b"'),
+        (Token.Punctuation, u']/'),
+        (Token.String.Interpol, u'}'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_curly_with_escape(lexer):
+    fragment = u'echo ${a//[\\"]/}\n'
+    tokens = [
+        (Token.Name.Builtin, u'echo'),
+        (Token.Text, u' '),
+        (Token.String.Interpol, u'${'),
+        (Token.Name.Variable, u'a'),
+        (Token.Punctuation, u'//['),
+        (Token.Literal.String.Escape, u'\\"'),
+        (Token.Punctuation, u']/'),
+        (Token.String.Interpol, u'}'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_parsed_single(lexer):
+    fragment = u"a=$'abc\\''\n"
+    tokens = [
+        (Token.Name.Variable, u'a'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Single, u"$'abc\\''"),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_short_variable_names(lexer):
+    fragment = u'x="$"\ny="$_"\nz="$abc"\n'
+    tokens = [
+        # single lone $
+        (Token.Name.Variable, u'x'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'$'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+        # single letter shell var
+        (Token.Name.Variable, u'y'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Name.Variable, u'$_'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+        # multi-letter user var
+        (Token.Name.Variable, u'z'),
+        (Token.Operator, u'='),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Name.Variable, u'$abc'),
+        (Token.Literal.String.Double, u'"'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_array_nums(lexer):
+    fragment = u'a=(1 2 3)\n'
+    tokens = [
+        (Token.Name.Variable, u'a'),
+        (Token.Operator, u'='),
+        (Token.Operator, u'('),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'2'),
+        (Token.Text, u' '),
+        (Token.Literal.Number, u'3'),
+        (Token.Operator, u')'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_end_of_line_nums(lexer):
+    fragment = u'a=1\nb=2 # comment\n'
+    tokens = [
+        (Token.Name.Variable, u'a'),
+        (Token.Operator, u'='),
+        (Token.Literal.Number, u'1'),
+        (Token.Text, u'\n'),
+        (Token.Name.Variable, u'b'),
+        (Token.Operator, u'='),
+        (Token.Literal.Number, u'2'),
+        (Token.Text, u' '),
+        (Token.Comment.Single, u'# comment\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
+
+
+def test_needs_name(lexer_session):
+    fragment = u'$ echo \\\nhi\nhi\n'
+    tokens = [
+        (Token.Text, u''),
+        (Token.Generic.Prompt, u'$'),
+        (Token.Text, u' '),
+        (Token.Name.Builtin, u'echo'),
+        (Token.Text, u' '),
+        (Token.Literal.String.Escape, u'\\\n'),
+        (Token.Text, u'hi'),
+        (Token.Text, u'\n'),
+        (Token.Generic.Output, u'hi\n'),
+    ]
+    assert list(lexer_session.get_tokens(fragment)) == tokens
diff --git a/tests/test_smarty.py b/tests/test_smarty.py
index a9bce11c..2d172559 100644
--- a/tests/test_smarty.py
+++ b/tests/test_smarty.py
@@ -1,39 +1,39 @@
 # -*- coding: utf-8 -*-
 """
     Basic SmartyLexer Test
-    ~~~~~~~~~~~~~~~~~~~~
+    ~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers import SmartyLexer
 
 
-class SmartyTest(unittest.TestCase):
+@pytest.fixture(scope='module')
+def lexer():
+    yield SmartyLexer()
 
-    def setUp(self):
-        self.lexer = SmartyLexer()
 
-    def testNestedCurly(self):
-        fragment = u'{templateFunction param={anotherFunction} param2=$something}\n'
-        tokens = [
-            (Token.Comment.Preproc, u'{'),
-            (Token.Name.Function, u'templateFunction'),
-            (Token.Text, u' '),
-            (Token.Name.Attribute, u'param'),
-            (Token.Operator, u'='),
-            (Token.Comment.Preproc, u'{'),
-            (Token.Name.Attribute, u'anotherFunction'),
-            (Token.Comment.Preproc, u'}'),
-            (Token.Text, u' '),
-            (Token.Name.Attribute, u'param2'),
-            (Token.Operator, u'='),
-            (Token.Name.Variable, u'$something'),
-            (Token.Comment.Preproc, u'}'),
-            (Token.Other, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+def test_nested_curly(lexer):
+    fragment = u'{templateFunction param={anotherFunction} param2=$something}\n'
+    tokens = [
+        (Token.Comment.Preproc, u'{'),
+        (Token.Name.Function, u'templateFunction'),
+        (Token.Text, u' '),
+        (Token.Name.Attribute, u'param'),
+        (Token.Operator, u'='),
+        (Token.Comment.Preproc, u'{'),
+        (Token.Name.Attribute, u'anotherFunction'),
+        (Token.Comment.Preproc, u'}'),
+        (Token.Text, u' '),
+        (Token.Name.Attribute, u'param2'),
+        (Token.Operator, u'='),
+        (Token.Name.Variable, u'$something'),
+        (Token.Comment.Preproc, u'}'),
+        (Token.Other, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tests/test_sql.py b/tests/test_sql.py
index 6be34006..efd63be6 100644
--- a/tests/test_sql.py
+++ b/tests/test_sql.py
@@ -3,10 +3,11 @@
     Pygments SQL lexers tests
     ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
-import unittest
+
+import pytest
 
 from pygments.lexers.sql import name_between_bracket_re, \
     name_between_backtick_re, tsql_go_re, tsql_declare_re, \
@@ -15,104 +16,102 @@ from pygments.lexers.sql import name_between_bracket_re, \
 from pygments.token import Comment, Name, Number, Punctuation, Whitespace
 
 
-class TransactSqlLexerTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = TransactSqlLexer()
-
-    def _assertAreTokensOfType(self, examples, expected_token_type):
-        for test_number, example in enumerate(examples.split(), 1):
-            token_count = 0
-            for token_type, token_value in self.lexer.get_tokens(example):
-                if token_type != Whitespace:
-                    token_count += 1
-                    self.assertEqual(
-                        token_type, expected_token_type,
-                        'token_type #%d for %s is be %s but must be %s' %
-                        (test_number, token_value, token_type, expected_token_type))
-            self.assertEqual(
-                token_count, 1,
-                '%s must yield exactly 1 token instead of %d' %
-                (example, token_count))
-
-    def _assertTokensMatch(self, text, expected_tokens_without_trailing_newline):
-        actual_tokens = tuple(self.lexer.get_tokens(text))
-        if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
-            actual_tokens = tuple(actual_tokens[:-1])
-        self.assertEqual(
-            expected_tokens_without_trailing_newline, actual_tokens,
-            'text must yield expected tokens: %s' % text)
-
-    def test_can_lex_float(self):
-        self._assertAreTokensOfType(
-            '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', Number.Float)
-        self._assertTokensMatch(
-            '1e2.1e2',
-            ((Number.Float, '1e2'), (Number.Float, '.1e2'))
+@pytest.fixture(scope='module')
+def lexer():
+    yield TransactSqlLexer()
+
+
+def _assert_are_tokens_of_type(lexer, examples, expected_token_type):
+    for test_number, example in enumerate(examples.split(), 1):
+        token_count = 0
+        for token_type, token_value in lexer.get_tokens(example):
+            if token_type != Whitespace:
+                token_count += 1
+                assert token_type == expected_token_type, \
+                    'token_type #%d for %s is be %s but must be %s' % \
+                    (test_number, token_value, token_type, expected_token_type)
+        assert token_count == 1, \
+            '%s must yield exactly 1 token instead of %d' % \
+            (example, token_count)
+
+
+def _assert_tokens_match(lexer, text, expected_tokens_without_trailing_newline):
+    actual_tokens = tuple(lexer.get_tokens(text))
+    if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
+        actual_tokens = tuple(actual_tokens[:-1])
+    assert expected_tokens_without_trailing_newline == actual_tokens, \
+        'text must yield expected tokens: %s' % text
+
+
+def test_can_lex_float(lexer):
+    _assert_are_tokens_of_type(lexer,
+                               '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2',
+                               Number.Float)
+    _assert_tokens_match(lexer,
+                         '1e2.1e2',
+                         ((Number.Float, '1e2'), (Number.Float, '.1e2')))
+
+
+def test_can_reject_almost_float(lexer):
+    _assert_tokens_match(lexer, '.e1', ((Punctuation, '.'), (Name, 'e1')))
+
+
+def test_can_lex_integer(lexer):
+    _assert_are_tokens_of_type(lexer, '1 23 456', Number.Integer)
+
+
+def test_can_lex_names(lexer):
+    _assert_are_tokens_of_type(lexer,
+                               u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2',
+                               Name)
+
+
+def test_can_lex_comments(lexer):
+    _assert_tokens_match(lexer, '--\n', ((Comment.Single, '--\n'),))
+    _assert_tokens_match(lexer, '/**/', (
+        (Comment.Multiline, '/*'), (Comment.Multiline, '*/')
+    ))
+    _assert_tokens_match(lexer, '/*/**/*/', (
+        (Comment.Multiline, '/*'),
+        (Comment.Multiline, '/*'),
+        (Comment.Multiline, '*/'),
+        (Comment.Multiline, '*/'),
+    ))
+
+
+def test_can_match_analyze_text_res():
+    assert ['`a`', '`bc`'] == \
+        name_between_backtick_re.findall('select `a`, `bc` from some')
+    assert ['[a]', '[bc]'] == \
+        name_between_bracket_re.findall('select [a], [bc] from some')
+    assert tsql_declare_re.search('--\nDeClaRe @some int;')
+    assert tsql_go_re.search('select 1\ngo\n--')
+    assert tsql_variable_re.search('create procedure dbo.usp_x @a int, @b int')
+
+
+def test_can_analyze_text():
+    mysql_lexer = MySqlLexer()
+    sql_lexer = SqlLexer()
+    tsql_lexer = TransactSqlLexer()
+    code_to_expected_lexer_map = {
+        'select `a`, `bc` from some': mysql_lexer,
+        'select a, bc from some': sql_lexer,
+        'select [a], [bc] from some': tsql_lexer,
+        '-- `a`, `bc`\nselect [a], [bc] from some': tsql_lexer,
+        '-- `a`, `bc`\nselect [a], [bc] from some; go': tsql_lexer,
+    }
+    sql_lexers = set(code_to_expected_lexer_map.values())
+    for code, expected_lexer in code_to_expected_lexer_map.items():
+        ratings_and_lexers = list((lexer.analyse_text(code), lexer.name) for lexer in sql_lexers)
+        best_rating, best_lexer_name  = sorted(ratings_and_lexers, reverse=True)[0]
+        expected_rating = expected_lexer.analyse_text(code)
+        message = (
+            'lexer must be %s (rating %.2f) instead of '
+            '%s (rating %.2f) for analyse_text() on code:\n%s') % (
+            expected_lexer.name,
+            expected_rating,
+            best_lexer_name,
+            best_rating,
+            code
         )
-
-    def test_can_reject_almost_float(self):
-        self._assertTokensMatch(
-            '.e1',
-            ((Punctuation, '.'), (Name, 'e1')))
-
-    def test_can_lex_integer(self):
-        self._assertAreTokensOfType(
-            '1 23 456', Number.Integer)
-
-    def test_can_lex_names(self):
-        self._assertAreTokensOfType(
-            u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2', Name)
-
-    def test_can_lex_comments(self):
-        self._assertTokensMatch('--\n', ((Comment.Single, '--\n'),))
-        self._assertTokensMatch('/**/', (
-            (Comment.Multiline, '/*'), (Comment.Multiline, '*/')
-        ))
-        self._assertTokensMatch('/*/**/*/', (
-            (Comment.Multiline, '/*'),
-            (Comment.Multiline, '/*'),
-            (Comment.Multiline, '*/'),
-            (Comment.Multiline, '*/'),
-        ))
-
-
-class SqlAnalyzeTextTest(unittest.TestCase):
-    def test_can_match_analyze_text_res(self):
-        self.assertEqual(['`a`', '`bc`'],
-            name_between_backtick_re.findall('select `a`, `bc` from some'))
-        self.assertEqual(['[a]', '[bc]'],
-            name_between_bracket_re.findall('select [a], [bc] from some'))
-        self.assertTrue(tsql_declare_re.search('--\nDeClaRe @some int;'))
-        self.assertTrue(tsql_go_re.search('select 1\ngo\n--'))
-        self.assertTrue(tsql_variable_re.search(
-            'create procedure dbo.usp_x @a int, @b int'))
-
-    def test_can_analyze_text(self):
-        mysql_lexer = MySqlLexer()
-        sql_lexer = SqlLexer()
-        tsql_lexer = TransactSqlLexer()
-        code_to_expected_lexer_map = {
-            'select `a`, `bc` from some': mysql_lexer,
-            'select a, bc from some': sql_lexer,
-            'select [a], [bc] from some': tsql_lexer,
-            '-- `a`, `bc`\nselect [a], [bc] from some': tsql_lexer,
-            '-- `a`, `bc`\nselect [a], [bc] from some; go': tsql_lexer,
-        }
-        sql_lexers = set(code_to_expected_lexer_map.values())
-        for code, expected_lexer in code_to_expected_lexer_map.items():
-            ratings_and_lexers = list((lexer.analyse_text(code), lexer.name) for lexer in sql_lexers)
-            best_rating, best_lexer_name  = sorted(ratings_and_lexers, reverse=True)[0]
-            expected_rating = expected_lexer.analyse_text(code)
-            message = (
-                'lexer must be %s (rating %.2f) instead of '
-                '%s (rating %.2f) for analyse_text() on code:\n%s') % (
-                expected_lexer.name,
-                expected_rating,
-                best_lexer_name,
-                best_rating,
-                code
-            )
-            self.assertEqual(
-                expected_lexer.name, best_lexer_name, message
-            )
+        assert expected_lexer.name == best_lexer_name, message
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
index 1f44807d..91ad8937 100644
--- a/tests/test_terminal_formatter.py
+++ b/tests/test_terminal_formatter.py
@@ -3,13 +3,12 @@
     Pygments terminal formatter tests
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 from __future__ import print_function
 
-import unittest
 import re
 
 from pygments.util import StringIO
@@ -37,26 +36,26 @@ def strip_ansi(x):
     return ANSI_RE.sub('', x)
 
 
-class TerminalFormatterTest(unittest.TestCase):
-    def test_reasonable_output(self):
-        out = StringIO()
-        TerminalFormatter().format(DEMO_TOKENS, out)
-        plain = strip_ansi(out.getvalue())
-        self.assertEqual(DEMO_TEXT.count('\n'), plain.count('\n'))
-        print(repr(plain))
+def test_reasonable_output():
+    out = StringIO()
+    TerminalFormatter().format(DEMO_TOKENS, out)
+    plain = strip_ansi(out.getvalue())
+    assert DEMO_TEXT.count('\n') == plain.count('\n')
+    print(repr(plain))
 
-        for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
-            self.assertEqual(a, b)
+    for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+        assert a == b
 
-    def test_reasonable_output_lineno(self):
-        out = StringIO()
-        TerminalFormatter(linenos=True).format(DEMO_TOKENS, out)
-        plain = strip_ansi(out.getvalue())
-        self.assertEqual(DEMO_TEXT.count('\n') + 1, plain.count('\n'))
-        print(repr(plain))
 
-        for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
-            self.assertTrue(a in b)
+def test_reasonable_output_lineno():
+    out = StringIO()
+    TerminalFormatter(linenos=True).format(DEMO_TOKENS, out)
+    plain = strip_ansi(out.getvalue())
+    assert DEMO_TEXT.count('\n') + 1 == plain.count('\n')
+    print(repr(plain))
+
+    for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
+        assert a in b
 
 
 class MyStyle(Style):
@@ -68,8 +67,7 @@ class MyStyle(Style):
     }
 
 
-class Terminal256FormatterTest(unittest.TestCase):
-    code = '''
+CODE = '''
 # this should be a comment
 print("Hello World")
 async def function(a,b,c, *d, **kwarg:Bool)->Bool:
@@ -78,25 +76,27 @@ async def function(a,b,c, *d, **kwarg:Bool)->Bool:
 
 '''
 
-    def test_style_html(self):
-        style = HtmlFormatter(style=MyStyle).get_style_defs()
-        self.assertTrue('#555555' in style,
-                        "ansigray for comment not html css style")
-
-    def test_others_work(self):
-        """check other formatters don't crash"""
-        highlight(self.code, Python3Lexer(), LatexFormatter(style=MyStyle))
-        highlight(self.code, Python3Lexer(), HtmlFormatter(style=MyStyle))
-
-    def test_256esc_seq(self):
-        """
-        test that a few escape sequences are actualy used when using ansi<> color codes
-        """
-        def termtest(x):
-            return highlight(x, Python3Lexer(),
-                             Terminal256Formatter(style=MyStyle))
-
-        self.assertTrue('32;41' in termtest('0x123'))
-        self.assertTrue('32;42' in termtest('123'))
-        self.assertTrue('30;01' in termtest('#comment'))
-        self.assertTrue('34;41' in termtest('"String"'))
+
+def test_style_html():
+    style = HtmlFormatter(style=MyStyle).get_style_defs()
+    assert '#555555' in style, "ansigray for comment not html css style"
+
+
+def test_others_work():
+    """check other formatters don't crash"""
+    highlight(CODE, Python3Lexer(), LatexFormatter(style=MyStyle))
+    highlight(CODE, Python3Lexer(), HtmlFormatter(style=MyStyle))
+
+
+def test_256esc_seq():
+    """
+    test that a few escape sequences are actualy used when using ansi<> color codes
+    """
+    def termtest(x):
+        return highlight(x, Python3Lexer(),
+                         Terminal256Formatter(style=MyStyle))
+
+    assert '32;41' in termtest('0x123')
+    assert '32;42' in termtest('123')
+    assert '30;01' in termtest('#comment')
+    assert '34;41' in termtest('"String"')
diff --git a/tests/test_textfmts.py b/tests/test_textfmts.py
index 57c2b61f..5f369007 100644
--- a/tests/test_textfmts.py
+++ b/tests/test_textfmts.py
@@ -3,38 +3,36 @@
     Basic Tests for textfmts
     ~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.token import Token
 from pygments.lexers.textfmts import HttpLexer
 
 
-class RubyTest(unittest.TestCase):
-
-    def setUp(self):
-        self.lexer = HttpLexer()
-        self.maxDiff = None
-
-    def testApplicationXml(self):
-        fragment = u'GET / HTTP/1.0\nContent-Type: application/xml\n\n\n'
-        tokens = [
-            (Token.Name.Tag, u''),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(
-            tokens, list(self.lexer.get_tokens(fragment))[-len(tokens):])
-
-    def testApplicationCalendarXml(self):
-        fragment = u'GET / HTTP/1.0\nContent-Type: application/calendar+xml\n\n\n'
-        tokens = [
-            (Token.Name.Tag, u''),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(
-            tokens, list(self.lexer.get_tokens(fragment))[-len(tokens):])
+@pytest.fixture(scope='module')
+def lexer():
+    yield HttpLexer()
+
+
+def test_application_xml(lexer):
+    fragment = u'GET / HTTP/1.0\nContent-Type: application/xml\n\n\n'
+    tokens = [
+        (Token.Name.Tag, u''),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment))[-len(tokens):] == tokens
+
+
+def test_application_calendar_xml(lexer):
+    fragment = u'GET / HTTP/1.0\nContent-Type: application/calendar+xml\n\n\n'
+    tokens = [
+        (Token.Name.Tag, u''),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment))[-len(tokens):] == tokens
diff --git a/tests/test_token.py b/tests/test_token.py
index 94522373..11e4d375 100644
--- a/tests/test_token.py
+++ b/tests/test_token.py
@@ -3,52 +3,50 @@
     Test suite for the token module
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 import copy
-import unittest
 
-from pygments import token
+import pytest
 
+from pygments import token
 
-class TokenTest(unittest.TestCase):
 
-    def test_tokentype(self):
-        e = self.assertEqual
+def test_tokentype():
+    t = token.String
+    assert t.split() == [token.Token, token.Literal, token.String]
+    assert t.__class__ is token._TokenType
 
-        t = token.String
 
-        e(t.split(), [token.Token, token.Literal, token.String])
+def test_functions():
+    assert token.is_token_subtype(token.String, token.String)
+    assert token.is_token_subtype(token.String, token.Literal)
+    assert not token.is_token_subtype(token.Literal, token.String)
 
-        e(t.__class__, token._TokenType)
+    assert token.string_to_tokentype(token.String) is token.String
+    assert token.string_to_tokentype('') is token.Token
+    assert token.string_to_tokentype('String') is token.String
 
-    def test_functions(self):
-        self.assertTrue(token.is_token_subtype(token.String, token.String))
-        self.assertTrue(token.is_token_subtype(token.String, token.Literal))
-        self.assertFalse(token.is_token_subtype(token.Literal, token.String))
 
-        self.assertTrue(token.string_to_tokentype(token.String) is token.String)
-        self.assertTrue(token.string_to_tokentype('') is token.Token)
-        self.assertTrue(token.string_to_tokentype('String') is token.String)
+def test_sanity_check():
+    stp = token.STANDARD_TYPES.copy()
+    stp[token.Token] = '---'  # Token and Text do conflict, that is okay
+    t = {}
+    for k, v in stp.items():
+        t.setdefault(v, []).append(k)
+    if len(t) == len(stp):
+        return  # Okay
 
-    def test_sanity_check(self):
-        stp = token.STANDARD_TYPES.copy()
-        stp[token.Token] = '---' # Token and Text do conflict, that is okay
-        t = {}
-        for k, v in stp.items():
-            t.setdefault(v, []).append(k)
-        if len(t) == len(stp):
-            return # Okay
+    for k, v in t.items():
+        if len(v) > 1:
+            pytest.fail("%r has more than one key: %r" % (k, v))
 
-        for k, v in t.items():
-            if len(v) > 1:
-                self.fail("%r has more than one key: %r" % (k, v))
 
-    def test_copying(self):
-        # Token instances are supposed to be singletons, so copying or even
-        # deepcopying should return themselves
-        t = token.String
-        self.assertIs(t, copy.copy(t))
-        self.assertIs(t, copy.deepcopy(t))
+def test_copying():
+    # Token instances are supposed to be singletons, so copying or even
+    # deepcopying should return themselves
+    t = token.String
+    assert t is copy.copy(t)
+    assert t is copy.deepcopy(t)
diff --git a/tests/test_unistring.py b/tests/test_unistring.py
index c56b68c7..a4b58827 100644
--- a/tests/test_unistring.py
+++ b/tests/test_unistring.py
@@ -3,46 +3,45 @@
     Test suite for the unistring module
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 import re
-import unittest
 import random
 
 from pygments import unistring as uni
 from pygments.util import unichr
 
 
-class UnistringTest(unittest.TestCase):
-    def test_cats_exist_and_compilable(self):
-        for cat in uni.cats:
-            s = getattr(uni, cat)
-            if s == '':  # Probably Cs on Jython
-                continue
-            print("%s %r" % (cat, s))
-            re.compile('[%s]' % s)
-
-    def _cats_that_match(self, c):
-        matching_cats = []
-        for cat in uni.cats:
-            s = getattr(uni, cat)
-            if s == '':  # Probably Cs on Jython
-                continue
-            if re.compile('[%s]' % s).match(c):
-                matching_cats.append(cat)
-        return matching_cats
-
-    def test_spot_check_types(self):
-        # Each char should match one, and precisely one, category
-        random.seed(0)
-        for i in range(1000):
-            o = random.randint(0, 65535)
-            c = unichr(o)
-            if o > 0xd800 and o <= 0xdfff and not uni.Cs:
-                continue  # Bah, Jython.
-            print(hex(o))
-            cats = self._cats_that_match(c)
-            self.assertEqual(len(cats), 1,
-                             "%d (%s): %s" % (o, c, cats))
+def test_cats_exist_and_compilable():
+    for cat in uni.cats:
+        s = getattr(uni, cat)
+        if s == '':  # Probably Cs on Jython
+            continue
+        print("%s %r" % (cat, s))
+        re.compile('[%s]' % s)
+
+
+def _cats_that_match(c):
+    matching_cats = []
+    for cat in uni.cats:
+        s = getattr(uni, cat)
+        if s == '':  # Probably Cs on Jython
+            continue
+        if re.compile('[%s]' % s).match(c):
+            matching_cats.append(cat)
+    return matching_cats
+
+
+def test_spot_check_types():
+    # Each char should match one, and precisely one, category
+    random.seed(0)
+    for i in range(1000):
+        o = random.randint(0, 65535)
+        c = unichr(o)
+        if o > 0xd800 and o <= 0xdfff and not uni.Cs:
+            continue  # Bah, Jython.
+        print(hex(o))
+        cats = _cats_that_match(c)
+        assert len(cats) == 1, "%d (%s): %s" % (o, c, cats)
diff --git a/tests/test_using_api.py b/tests/test_using_api.py
index 35546508..616bff6f 100644
--- a/tests/test_using_api.py
+++ b/tests/test_using_api.py
@@ -7,7 +7,7 @@
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+from pytest import raises
 
 from pygments.lexer import using, bygroups, this, RegexLexer
 from pygments.token import String, Text, Keyword
@@ -28,14 +28,13 @@ class MyLexer(RegexLexer):
     }
 
 
-class UsingStateTest(unittest.TestCase):
-    def test_basic(self):
-        expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
-                    (String, '"'), (Text, 'e\n')]
-        t = list(MyLexer().get_tokens('a"bcd"e'))
-        self.assertEqual(t, expected)
+def test_basic():
+    expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
+                (String, '"'), (Text, 'e\n')]
+    assert list(MyLexer().get_tokens('a"bcd"e')) == expected
 
-    def test_error(self):
-        def gen():
-            return list(MyLexer().get_tokens('#a'))
-        self.assertRaises(KeyError, gen)
+
+def test_error():
+    def gen():
+        return list(MyLexer().get_tokens('#a'))
+    assert raises(KeyError, gen)
diff --git a/tests/test_util.py b/tests/test_util.py
index cdb58b3f..aa7b7acb 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -3,12 +3,13 @@
     Test suite for the util module
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
 import re
-import unittest
+
+from pytest import raises
 
 from pygments import util, console
 
@@ -19,195 +20,201 @@ class FakeLexer(object):
     analyse = util.make_analysator(analyse)
 
 
-class UtilTest(unittest.TestCase):
-
-    def test_getoptions(self):
-        raises = self.assertRaises
-        equals = self.assertEqual
-
-        equals(util.get_bool_opt({}, 'a', True), True)
-        equals(util.get_bool_opt({}, 'a', 1), True)
-        equals(util.get_bool_opt({}, 'a', 'true'), True)
-        equals(util.get_bool_opt({}, 'a', 'no'), False)
-        raises(util.OptionError, util.get_bool_opt, {}, 'a', [])
-        raises(util.OptionError, util.get_bool_opt, {}, 'a', 'foo')
-
-        equals(util.get_int_opt({}, 'a', 1), 1)
-        raises(util.OptionError, util.get_int_opt, {}, 'a', [])
-        raises(util.OptionError, util.get_int_opt, {}, 'a', 'bar')
-
-        equals(util.get_list_opt({}, 'a', [1]), [1])
-        equals(util.get_list_opt({}, 'a', '1 2'), ['1', '2'])
-        raises(util.OptionError, util.get_list_opt, {}, 'a', 1)
-
-        equals(util.get_choice_opt({}, 'a', ['foo', 'bar'], 'bar'), 'bar')
-        equals(util.get_choice_opt({}, 'a', ['foo', 'bar'], 'Bar', True), 'bar')
-        raises(util.OptionError, util.get_choice_opt, {}, 'a',
-               ['foo', 'bar'], 'baz')
-
-    def test_docstring_headline(self):
-        def f1():
-            """
-            docstring headline
-
-            other text
-            """
-        def f2():
-            """
-            docstring
-            headline
-
-            other text
-            """
-        def f3():
-            pass
-
-        self.assertEqual(util.docstring_headline(f1), 'docstring headline')
-        self.assertEqual(util.docstring_headline(f2), 'docstring headline')
-        self.assertEqual(util.docstring_headline(f3), '')
-
-    def test_analysator_returns_float(self):
-        # If an analysator wrapped by make_analysator returns a floating point
-        # number, then that number will be returned by the wrapper.
-        self.assertEqual(FakeLexer.analyse('0.5'), 0.5)
-
-    def test_analysator_returns_boolean(self):
-        # If an analysator wrapped by make_analysator returns a boolean value,
-        # then the wrapper will return 1.0 if the boolean was True or 0.0 if
-        # it was False.
-        self.assertEqual(FakeLexer.analyse(True), 1.0)
-        self.assertEqual(FakeLexer.analyse(False), 0.0)
-
-    def test_analysator_raises_exception(self):
-        # If an analysator wrapped by make_analysator raises an exception,
-        # then the wrapper will return 0.0.
-        class ErrorLexer(object):
-            def analyse(text):
-                raise RuntimeError('something bad happened')
-            analyse = util.make_analysator(analyse)
-        self.assertEqual(ErrorLexer.analyse(''), 0.0)
-
-    def test_analysator_value_error(self):
-        # When converting the analysator's return value to a float a
-        # ValueError may occur.  If that happens 0.0 is returned instead.
-        self.assertEqual(FakeLexer.analyse('bad input'), 0.0)
-
-    def test_analysator_type_error(self):
-        # When converting the analysator's return value to a float a
-        # TypeError may occur.  If that happens 0.0 is returned instead.
-        self.assertEqual(FakeLexer.analyse('xxx'), 0.0)
-
-    def test_shebang_matches(self):
-        self.assertTrue(util.shebang_matches('#!/usr/bin/env python\n', r'python(2\.\d)?'))
-        self.assertTrue(util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?'))
-        self.assertTrue(util.shebang_matches('#!/usr/bin/startsomethingwith python',
-                                             r'python(2\.\d)?'))
-        self.assertTrue(util.shebang_matches('#!C:\\Python2.4\\Python.exe',
-                                             r'python(2\.\d)?'))
-
-        self.assertFalse(util.shebang_matches('#!/usr/bin/python-ruby',
-                                              r'python(2\.\d)?'))
-        self.assertFalse(util.shebang_matches('#!/usr/bin/python/ruby',
-                                              r'python(2\.\d)?'))
-        self.assertFalse(util.shebang_matches('#!', r'python'))
-
-    def test_doctype_matches(self):
-        self.assertTrue(util.doctype_matches(
-            ' ', 'html.*'))
-        self.assertFalse(util.doctype_matches(
-            '  ', 'html.*'))
-        self.assertTrue(util.html_doctype_matches(
-            ''))
-
-    def test_xml(self):
-        self.assertTrue(util.looks_like_xml(
-            ''))
-        self.assertTrue(util.looks_like_xml('abc'))
-        self.assertFalse(util.looks_like_xml(''))
-
-    def test_unirange(self):
-        first_non_bmp = u'\U00010000'
-        r = re.compile(util.unirange(0x10000, 0x20000))
-        m = r.match(first_non_bmp)
-        self.assertTrue(m)
-        self.assertEqual(m.end(), len(first_non_bmp))
-        self.assertFalse(r.match(u'\uffff'))
-        self.assertFalse(r.match(u'xxx'))
-        # Tests that end is inclusive
-        r = re.compile(util.unirange(0x10000, 0x10000) + '+')
-        # Tests that the plus works for the entire unicode point, if narrow
-        # build
-        m = r.match(first_non_bmp * 2)
-        self.assertTrue(m)
-        self.assertEqual(m.end(), len(first_non_bmp) * 2)
-
-    def test_format_lines(self):
-        lst = ['cat', 'dog']
-        output = util.format_lines('var', lst)
-        d = {}
-        exec(output, d)
-        self.assertTrue(isinstance(d['var'], tuple))
-        self.assertEqual(('cat', 'dog'), d['var'])
-
-    def test_duplicates_removed_seq_types(self):
-        # tuple
-        x = util.duplicates_removed(('a', 'a', 'b'))
-        self.assertEqual(['a', 'b'], x)
-        # list
-        x = util.duplicates_removed(['a', 'a', 'b'])
-        self.assertEqual(['a', 'b'], x)
-        # iterator
-        x = util.duplicates_removed(iter(('a', 'a', 'b')))
-        self.assertEqual(['a', 'b'], x)
-
-    def test_duplicates_removed_nonconsecutive(self):
-        # keeps first
-        x = util.duplicates_removed(('a', 'b', 'a'))
-        self.assertEqual(['a', 'b'], x)
-
-    def test_guess_decode(self):
-        # UTF-8 should be decoded as UTF-8
-        s = util.guess_decode(u'\xff'.encode('utf-8'))
-        self.assertEqual(s, (u'\xff', 'utf-8'))
-
-        # otherwise, it could be latin1 or the locale encoding...
-        import locale
-        s = util.guess_decode(b'\xff')
-        self.assertTrue(s[1] in ('latin1', locale.getpreferredencoding()))
-
-    def test_guess_decode_from_terminal(self):
-        class Term:
-            encoding = 'utf-7'
-
-        s = util.guess_decode_from_terminal(u'\xff'.encode('utf-7'), Term)
-        self.assertEqual(s, (u'\xff', 'utf-7'))
-
-        s = util.guess_decode_from_terminal(u'\xff'.encode('utf-8'), Term)
-        self.assertEqual(s, (u'\xff', 'utf-8'))
-
-    def test_add_metaclass(self):
-        class Meta(type):
-            pass
-
-        @util.add_metaclass(Meta)
-        class Cls:
-            pass
-
-        self.assertEqual(type(Cls), Meta)
-
-
-class ConsoleTest(unittest.TestCase):
-
-    def test_ansiformat(self):
-        f = console.ansiformat
-        c = console.codes
-        all_attrs = f('+*_blue_*+', 'text')
-        self.assertTrue(c['blue'] in all_attrs and c['blink'] in all_attrs
-                        and c['bold'] in all_attrs and c['underline'] in all_attrs
-                        and c['reset'] in all_attrs)
-        self.assertRaises(KeyError, f, '*mauve*', 'text')
-
-    def test_functions(self):
-        self.assertEqual(console.reset_color(), console.codes['reset'])
-        self.assertEqual(console.colorize('blue', 'text'),
-                         console.codes['blue'] + 'text' + console.codes['reset'])
+def test_getoptions():
+    assert util.get_bool_opt({}, 'a', True) is True
+    assert util.get_bool_opt({}, 'a', 1) is True
+    assert util.get_bool_opt({}, 'a', 'true') is True
+    assert util.get_bool_opt({}, 'a', 'no') is False
+    assert raises(util.OptionError, util.get_bool_opt, {}, 'a', [])
+    assert raises(util.OptionError, util.get_bool_opt, {}, 'a', 'foo')
+
+    assert util.get_int_opt({}, 'a', 1) == 1
+    assert raises(util.OptionError, util.get_int_opt, {}, 'a', [])
+    assert raises(util.OptionError, util.get_int_opt, {}, 'a', 'bar')
+
+    assert util.get_list_opt({}, 'a', [1]) == [1]
+    assert util.get_list_opt({}, 'a', '1 2') == ['1', '2']
+    assert raises(util.OptionError, util.get_list_opt, {}, 'a', 1)
+
+    assert util.get_choice_opt({}, 'a', ['foo', 'bar'], 'bar') == 'bar'
+    assert util.get_choice_opt({}, 'a', ['foo', 'bar'], 'Bar', True) == 'bar'
+    assert raises(util.OptionError, util.get_choice_opt, {}, 'a',
+                  ['foo', 'bar'], 'baz')
+
+
+def test_docstring_headline():
+    def f1():
+        """
+        docstring headline
+
+        other text
+        """
+    def f2():
+        """
+        docstring
+        headline
+
+        other text
+        """
+    def f3():
+        pass
+
+    assert util.docstring_headline(f1) == 'docstring headline'
+    assert util.docstring_headline(f2) == 'docstring headline'
+    assert util.docstring_headline(f3) == ''
+
+
+def test_analysator_returns_float():
+    # If an analysator wrapped by make_analysator returns a floating point
+    # number, then that number will be returned by the wrapper.
+    assert FakeLexer.analyse('0.5') == 0.5
+
+
+def test_analysator_returns_boolean():
+    # If an analysator wrapped by make_analysator returns a boolean value,
+    # then the wrapper will return 1.0 if the boolean was True or 0.0 if
+    # it was False.
+    assert FakeLexer.analyse(True) == 1.0
+    assert FakeLexer.analyse(False) == 0.0
+
+
+def test_analysator_raises_exception():
+    # If an analysator wrapped by make_analysator raises an exception,
+    # then the wrapper will return 0.0.
+    class ErrorLexer(object):
+        def analyse(text):
+            raise RuntimeError('something bad happened')
+        analyse = util.make_analysator(analyse)
+    assert ErrorLexer.analyse('') == 0.0
+
+
+def test_analysator_value_error():
+    # When converting the analysator's return value to a float a
+    # ValueError may occur.  If that happens 0.0 is returned instead.
+    assert FakeLexer.analyse('bad input') == 0.0
+
+
+def test_analysator_type_error():
+    # When converting the analysator's return value to a float a
+    # TypeError may occur.  If that happens 0.0 is returned instead.
+    assert FakeLexer.analyse('xxx') == 0.0
+
+
+def test_shebang_matches():
+    assert util.shebang_matches('#!/usr/bin/env python\n', r'python(2\.\d)?')
+    assert util.shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
+    assert util.shebang_matches('#!/usr/bin/startsomethingwith python',
+                                r'python(2\.\d)?')
+    assert util.shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
+
+    assert not util.shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
+    assert not util.shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
+    assert not util.shebang_matches('#!', r'python')
+
+
+def test_doctype_matches():
+    assert util.doctype_matches(' ', 'html.*')
+    assert not util.doctype_matches(
+        '  ', 'html.*')
+    assert util.html_doctype_matches(
+        '')
+
+
+def test_xml():
+    assert util.looks_like_xml(
+        '')
+    assert util.looks_like_xml('abc')
+    assert not util.looks_like_xml('')
+
+
+def test_unirange():
+    first_non_bmp = u'\U00010000'
+    r = re.compile(util.unirange(0x10000, 0x20000))
+    m = r.match(first_non_bmp)
+    assert m
+    assert m.end() == len(first_non_bmp)
+    assert not r.match(u'\uffff')
+    assert not r.match(u'xxx')
+    # Tests that end is inclusive
+    r = re.compile(util.unirange(0x10000, 0x10000) + '+')
+    # Tests that the plus works for the entire unicode point, if narrow
+    # build
+    m = r.match(first_non_bmp * 2)
+    assert m
+    assert m.end() == len(first_non_bmp) * 2
+
+
+def test_format_lines():
+    lst = ['cat', 'dog']
+    output = util.format_lines('var', lst)
+    d = {}
+    exec(output, d)
+    assert isinstance(d['var'], tuple)
+    assert ('cat', 'dog') == d['var']
+
+
+def test_duplicates_removed_seq_types():
+    # tuple
+    x = util.duplicates_removed(('a', 'a', 'b'))
+    assert ['a', 'b'] == x
+    # list
+    x = util.duplicates_removed(['a', 'a', 'b'])
+    assert ['a', 'b'] == x
+    # iterator
+    x = util.duplicates_removed(iter(('a', 'a', 'b')))
+    assert ['a', 'b'] == x
+
+
+def test_duplicates_removed_nonconsecutive():
+    # keeps first
+    x = util.duplicates_removed(('a', 'b', 'a'))
+    assert ['a', 'b'] == x
+
+
+def test_guess_decode():
+    # UTF-8 should be decoded as UTF-8
+    s = util.guess_decode(u'\xff'.encode('utf-8'))
+    assert s == (u'\xff', 'utf-8')
+
+    # otherwise, it could be latin1 or the locale encoding...
+    import locale
+    s = util.guess_decode(b'\xff')
+    assert s[1] in ('latin1', locale.getpreferredencoding())
+
+
+def test_guess_decode_from_terminal():
+    class Term:
+        encoding = 'utf-7'
+
+    s = util.guess_decode_from_terminal(u'\xff'.encode('utf-7'), Term)
+    assert s == (u'\xff', 'utf-7')
+
+    s = util.guess_decode_from_terminal(u'\xff'.encode('utf-8'), Term)
+    assert s == (u'\xff', 'utf-8')
+
+
+def test_add_metaclass():
+    class Meta(type):
+        pass
+
+    @util.add_metaclass(Meta)
+    class Cls:
+        pass
+
+    assert type(Cls) is Meta
+
+
+def test_console_ansiformat():
+    f = console.ansiformat
+    c = console.codes
+    all_attrs = f('+*_blue_*+', 'text')
+    assert c['blue'] in all_attrs and c['blink'] in all_attrs
+    assert c['bold'] in all_attrs and c['underline'] in all_attrs
+    assert c['reset'] in all_attrs
+    assert raises(KeyError, f, '*mauve*', 'text')
+
+
+def test_console_functions():
+    assert console.reset_color() == console.codes['reset']
+    assert console.colorize('blue', 'text') == \
+        console.codes['blue'] + 'text' + console.codes['reset']
diff --git a/tests/test_whiley.py b/tests/test_whiley.py
index f447ffec..84fef25b 100644
--- a/tests/test_whiley.py
+++ b/tests/test_whiley.py
@@ -3,28 +3,29 @@
     Whiley Test
     ~~~~~~~~~~~
 
-    :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+    :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
     :license: BSD, see LICENSE for details.
 """
 
-import unittest
+import pytest
 
 from pygments.lexers import WhileyLexer
 from pygments.token import Token
 
 
-class WhileyTest(unittest.TestCase):
-    def setUp(self):
-        self.lexer = WhileyLexer()
+@pytest.fixture(scope='module')
+def lexer():
+    yield WhileyLexer()
 
-    def testWhileyOperator(self):
-        fragment = u'123 \u2200 x\n'
-        tokens = [
-            (Token.Literal.Number.Integer, u'123'),
-            (Token.Text, u' '),
-            (Token.Operator, u'\u2200'),
-            (Token.Text, u' '),
-            (Token.Name, u'x'),
-            (Token.Text, u'\n'),
-        ]
-        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+def test_whiley_operator(lexer):
+    fragment = u'123 \u2200 x\n'
+    tokens = [
+        (Token.Literal.Number.Integer, u'123'),
+        (Token.Text, u' '),
+        (Token.Operator, u'\u2200'),
+        (Token.Text, u' '),
+        (Token.Name, u'x'),
+        (Token.Text, u'\n'),
+    ]
+    assert list(lexer.get_tokens(fragment)) == tokens
diff --git a/tox.ini b/tox.ini
index 2c63c292..98aedc1a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,8 @@
 [tox]
-envlist = py27, py35, py36, py37
+envlist = py27, py36, py37
+
 [testenv]
 deps =
-    nose
-    coverage
-commands = python -d tests/run.py {posargs}
+    pytest
+    pytest-cov
+commands = py.test {posargs}
-- 
cgit v1.2.1