summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2019-05-06 08:17:53 +0200
committerGeorg Brandl <georg@python.org>2019-11-10 10:15:13 +0100
commita41f232a0aa9f2b435e54a7e247c858a8f2d7fa8 (patch)
tree8245310d57e48e3e966a46f40e120d6ea3ce28f7
parent7b418bd18e01b06ccaa1c7bd03d11abf303f2fbd (diff)
downloadpygments-git-a41f232a0aa9f2b435e54a7e247c858a8f2d7fa8.tar.gz
Initial port to py.test
Unittest classes are kept - for now - since py.test is ok with them. Generator tests had to be switched to pytest parametrized tests.
-rw-r--r--.gitignore1
-rw-r--r--Makefile8
-rw-r--r--pytest.ini4
-rw-r--r--requirements.txt4
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/run.py58
-rw-r--r--tests/string_asserts.py22
-rw-r--r--tests/support.py17
-rw-r--r--tests/test_basic_api.py252
-rw-r--r--tests/test_cmdline.py9
-rw-r--r--tests/test_examplefiles.py48
-rw-r--r--tests/test_html_formatter.py18
-rw-r--r--tests/test_irc_formatter.py5
-rw-r--r--tests/test_javascript.py10
-rw-r--r--tests/test_latex_formatter.py10
-rw-r--r--tests/test_modeline.py7
-rw-r--r--tests/test_regexlexer.py12
-rw-r--r--tests/test_rtf_formatter.py18
-rw-r--r--tests/test_string_asserts.py35
-rw-r--r--tests/test_using_api.py7
20 files changed, 206 insertions, 339 deletions
diff --git a/.gitignore b/.gitignore
index 13fef3d9..66c733ff 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,6 +2,7 @@
*.pyc
*.pyo
.*.sw[op]
+/.pytest_cache/
/.idea/
/.project
/.tags
diff --git a/Makefile b/Makefile
index 2fcb832f..2053871a 100644
--- a/Makefile
+++ b/Makefile
@@ -49,14 +49,16 @@ pylint:
reindent:
@$(PYTHON) scripts/reindent.py -r -B .
+TEST = test
+
test:
- @$(PYTHON) tests/run.py -d $(TEST)
+ @$(PYTHON) `which py.test` $(TEST)
test-coverage:
- @$(PYTHON) tests/run.py -d --with-coverage --cover-package=pygments --cover-erase $(TEST)
+ @$(PYTHON) `which py.test` --cov --cov-report=html --cov-report=term $(TEST)
test-examplefiles:
- nosetests tests/test_examplefiles.py
+ @$(PYTHON) `which py.test` tests.test_examplefiles
tox-test:
@tox -- $(TEST)
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 00000000..006b89fd
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,4 @@
+[pytest]
+filterwarnings =
+ error::FutureWarning
+ error::DeprecationWarning
diff --git a/requirements.txt b/requirements.txt
index 4754a9d2..5d6829a8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
-coverage
-nose
+pytest-cov
+pytest
pyflakes
pylint
tox
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/run.py b/tests/run.py
deleted file mode 100644
index edebc7a1..00000000
--- a/tests/run.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- Pygments unit tests
- ~~~~~~~~~~~~~~~~~~
-
- Usage::
-
- python run.py [testfile ...]
-
-
- :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from __future__ import print_function
-
-import os
-import sys
-import warnings
-
-# only find tests in this directory
-if os.path.dirname(__file__):
- os.chdir(os.path.dirname(__file__))
-
-# make FutureWarnings (coming from Regex syntax most likely) and
-# DeprecationWarnings due to non-raw strings an error
-warnings.filterwarnings("error", module=r"pygments\..*",
- category=FutureWarning)
-warnings.filterwarnings("error", module=r".*pygments.*",
- category=DeprecationWarning)
-
-
-try:
- import nose
-except ImportError:
- print('nose is required to run the Pygments test suite')
- sys.exit(1)
-
-# make sure the current source is first on sys.path
-sys.path.insert(0, '..')
-
-if '--with-coverage' not in sys.argv:
- # if running with coverage, pygments should not be imported before coverage
- # is started, otherwise it will count already executed lines as uncovered
- try:
- import pygments
- except ImportError as err:
- print('Cannot find Pygments to test: %s' % err)
- sys.exit(1)
- else:
- print('Pygments %s test suite running (Python %s)...' %
- (pygments.__version__, sys.version.split()[0]),
- file=sys.stderr)
-else:
- print('Pygments test suite running (Python %s)...' % sys.version.split()[0],
- file=sys.stderr)
-
-nose.main()
diff --git a/tests/string_asserts.py b/tests/string_asserts.py
deleted file mode 100644
index a02c52bb..00000000
--- a/tests/string_asserts.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- Pygments string assert utility
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-class StringTests(object):
-
- def assertStartsWith(self, haystack, needle, msg=None):
- if msg is None:
- msg = "'{0}' does not start with '{1}'".format(haystack, needle)
- if not haystack.startswith(needle):
- raise(AssertionError(msg))
-
- def assertEndsWith(self, haystack, needle, msg=None):
- if msg is None:
- msg = "'{0}' does not end with '{1}'".format(haystack, needle)
- if not haystack.endswith(needle):
- raise(AssertionError(msg))
diff --git a/tests/support.py b/tests/support.py
deleted file mode 100644
index c66ac663..00000000
--- a/tests/support.py
+++ /dev/null
@@ -1,17 +0,0 @@
-# coding: utf-8
-"""
-Support for Pygments tests
-"""
-
-import os
-
-from nose import SkipTest
-
-
-def location(mod_name):
- """
- Return the file and directory that the code for *mod_name* is in.
- """
- source = mod_name.endswith("pyc") and mod_name[:-1] or mod_name
- source = os.path.abspath(source)
- return source, os.path.dirname(source)
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index b1b69267..07b2f09c 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -11,6 +11,9 @@ from __future__ import print_function
import random
import unittest
+from os import path
+
+import pytest
from pygments import lexers, formatters, lex, format
from pygments.token import _TokenType, Text
@@ -18,100 +21,92 @@ from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
from pygments.util import text_type, StringIO, BytesIO, xrange, ClassNotFound
-import support
-
-TESTFILE, TESTDIR = support.location(__file__)
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_basic_api.py')
test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'
-def test_lexer_instantiate_all():
+@pytest.mark.parametrize('name', lexers.LEXERS)
+def test_lexer_instantiate_all(name):
# instantiate every lexer, to see if the token type defs are correct
- def verify(name):
- getattr(lexers, name)
- for x in lexers.LEXERS:
- yield verify, x
+ getattr(lexers, name)
-def test_lexer_classes():
+@pytest.mark.parametrize('cls', lexers._iter_lexerclasses(plugins=False))
+def test_lexer_classes(cls):
# test that every lexer class has the correct public API
- def verify(cls):
- assert type(cls.name) is str
- for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes':
- assert hasattr(cls, attr)
- assert type(getattr(cls, attr)) is list, \
- "%s: %s attribute wrong" % (cls, attr)
- result = cls.analyse_text("abc")
- assert isinstance(result, float) and 0.0 <= result <= 1.0
- result = cls.analyse_text(".abc")
- assert isinstance(result, float) and 0.0 <= result <= 1.0
-
- assert all(al.lower() == al for al in cls.aliases)
-
- inst = cls(opt1="val1", opt2="val2")
- if issubclass(cls, RegexLexer):
- if not hasattr(cls, '_tokens'):
- # if there's no "_tokens", the lexer has to be one with
- # multiple tokendef variants
- assert cls.token_variants
- for variant in cls.tokens:
- assert 'root' in cls.tokens[variant]
- else:
- assert 'root' in cls._tokens, \
- '%s has no root state' % cls
-
- if cls.name in ['XQuery', 'Opa']: # XXX temporary
- return
-
- try:
- tokens = list(inst.get_tokens(test_content))
- except KeyboardInterrupt:
- raise KeyboardInterrupt(
- 'interrupted %s.get_tokens(): test_content=%r' %
- (cls.__name__, test_content))
- txt = ""
- for token in tokens:
- assert isinstance(token, tuple)
- assert isinstance(token[0], _TokenType)
- assert isinstance(token[1], text_type)
- txt += token[1]
- assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
- (cls.name, test_content, txt)
-
- for lexer in lexers._iter_lexerclasses(plugins=False):
- yield verify, lexer
-
-
-def test_lexer_options():
+ assert type(cls.name) is str
+ for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes':
+ assert hasattr(cls, attr)
+ assert type(getattr(cls, attr)) is list, \
+ "%s: %s attribute wrong" % (cls, attr)
+ result = cls.analyse_text("abc")
+ assert isinstance(result, float) and 0.0 <= result <= 1.0
+ result = cls.analyse_text(".abc")
+ assert isinstance(result, float) and 0.0 <= result <= 1.0
+
+ assert all(al.lower() == al for al in cls.aliases)
+
+ inst = cls(opt1="val1", opt2="val2")
+ if issubclass(cls, RegexLexer):
+ if not hasattr(cls, '_tokens'):
+ # if there's no "_tokens", the lexer has to be one with
+ # multiple tokendef variants
+ assert cls.token_variants
+ for variant in cls.tokens:
+ assert 'root' in cls.tokens[variant]
+ else:
+ assert 'root' in cls._tokens, \
+ '%s has no root state' % cls
+
+ if cls.name in ['XQuery', 'Opa']: # XXX temporary
+ return
+
+ try:
+ tokens = list(inst.get_tokens(test_content))
+ except KeyboardInterrupt:
+ raise KeyboardInterrupt(
+ 'interrupted %s.get_tokens(): test_content=%r' %
+ (cls.__name__, test_content))
+ txt = ""
+ for token in tokens:
+ assert isinstance(token, tuple)
+ assert isinstance(token[0], _TokenType)
+ assert isinstance(token[1], text_type)
+ txt += token[1]
+ assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
+ (cls.name, test_content, txt)
+
+
+@pytest.mark.parametrize('cls', lexers._iter_lexerclasses(plugins=False))
+def test_lexer_options(cls):
+ if cls.__name__ == 'RawTokenLexer':
+ # this one is special
+ return
+
# test that the basic options work
def ensure(tokens, output):
concatenated = ''.join(token[1] for token in tokens)
assert concatenated == output, \
'%s: %r != %r' % (lexer, concatenated, output)
- def verify(cls):
- inst = cls(stripnl=False)
- ensure(inst.get_tokens('a\nb'), 'a\nb\n')
- ensure(inst.get_tokens('\n\n\n'), '\n\n\n')
- inst = cls(stripall=True)
- ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n')
- # some lexers require full lines in input
- if ('ConsoleLexer' not in cls.__name__ and
- 'SessionLexer' not in cls.__name__ and
- not cls.__name__.startswith('Literate') and
- cls.__name__ not in ('ErlangShellLexer', 'RobotFrameworkLexer')):
- inst = cls(ensurenl=False)
- ensure(inst.get_tokens('a\nb'), 'a\nb')
- inst = cls(ensurenl=False, stripall=True)
- ensure(inst.get_tokens('a\nb\n\n'), 'a\nb')
-
- for lexer in lexers._iter_lexerclasses(plugins=False):
- if lexer.__name__ == 'RawTokenLexer':
- # this one is special
- continue
- yield verify, lexer
+ inst = cls(stripnl=False)
+ ensure(inst.get_tokens('a\nb'), 'a\nb\n')
+ ensure(inst.get_tokens('\n\n\n'), '\n\n\n')
+ inst = cls(stripall=True)
+ ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n')
+ # some lexers require full lines in input
+ if ('ConsoleLexer' not in cls.__name__ and
+ 'SessionLexer' not in cls.__name__ and
+ not cls.__name__.startswith('Literate') and
+ cls.__name__ not in ('ErlangShellLexer', 'RobotFrameworkLexer')):
+ inst = cls(ensurenl=False)
+ ensure(inst.get_tokens('a\nb'), 'a\nb')
+ inst = cls(ensurenl=False, stripall=True)
+ ensure(inst.get_tokens('a\nb\n\n'), 'a\nb')
def test_get_lexers():
@@ -127,7 +122,7 @@ def test_get_lexers():
(lexers.guess_lexer, ("#!/usr/bin/python -O\nprint",)),
(lexers.guess_lexer_for_filename, ("a.py", "<%= @foo %>"))
]:
- yield verify, func, args
+ verify(func, args)
for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.items():
assert cls == lexers.find_lexer_class(lname).__name__
@@ -146,38 +141,35 @@ def test_get_lexers():
raise Exception
-def test_formatter_public_api():
+@pytest.mark.parametrize('cls', [getattr(formatters, name)
+ for name in formatters.FORMATTERS])
+def test_formatter_public_api(cls):
# test that every formatter class has the correct public API
ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
string_out = StringIO()
bytes_out = BytesIO()
- def verify(formatter):
- info = formatters.FORMATTERS[formatter.__name__]
- assert len(info) == 5
- assert info[1], "missing formatter name"
- assert info[2], "missing formatter aliases"
- assert info[4], "missing formatter docstring"
-
- try:
- inst = formatter(opt1="val1")
- except (ImportError, FontNotFound) as e:
- raise support.SkipTest(e)
-
- try:
- inst.get_style_defs()
- except NotImplementedError:
- # may be raised by formatters for which it doesn't make sense
- pass
-
- if formatter.unicodeoutput:
- inst.format(ts, string_out)
- else:
- inst.format(ts, bytes_out)
+ info = formatters.FORMATTERS[cls.__name__]
+ assert len(info) == 5
+ assert info[1], "missing formatter name"
+ assert info[2], "missing formatter aliases"
+ assert info[4], "missing formatter docstring"
- for name in formatters.FORMATTERS:
- formatter = getattr(formatters, name)
- yield verify, formatter
+ try:
+ inst = cls(opt1="val1")
+ except (ImportError, FontNotFound) as e:
+ pytest.skip(str(e))
+
+ try:
+ inst.get_style_defs()
+ except NotImplementedError:
+ # may be raised by formatters for which it doesn't make sense
+ pass
+
+ if cls.unicodeoutput:
+ inst.format(ts, string_out)
+ else:
+ inst.format(ts, bytes_out)
def test_formatter_encodings():
@@ -201,37 +193,33 @@ def test_formatter_encodings():
assert u"ä".encode("utf8") in format(tokens, fmt)
-def test_formatter_unicode_handling():
+@pytest.mark.parametrize('cls', [getattr(formatters, name)
+ for name in formatters.FORMATTERS])
+def test_formatter_unicode_handling(cls):
# test that the formatter supports encoding and Unicode
tokens = list(lexers.PythonLexer(encoding='utf-8').
get_tokens("def f(): 'ä'"))
- def verify(formatter):
- try:
- inst = formatter(encoding=None)
- except (ImportError, FontNotFound) as e:
- # some dependency or font not installed
- raise support.SkipTest(e)
-
- if formatter.name != 'Raw tokens':
- out = format(tokens, inst)
- if formatter.unicodeoutput:
- assert type(out) is text_type, '%s: %r' % (formatter, out)
-
- inst = formatter(encoding='utf-8')
- out = format(tokens, inst)
- assert type(out) is bytes, '%s: %r' % (formatter, out)
- # Cannot test for encoding, since formatters may have to escape
- # non-ASCII characters.
- else:
- inst = formatter()
- out = format(tokens, inst)
- assert type(out) is bytes, '%s: %r' % (formatter, out)
-
- for formatter, info in formatters.FORMATTERS.items():
- # this tests the automatic importing as well
- fmter = getattr(formatters, formatter)
- yield verify, fmter
+ try:
+ inst = cls(encoding=None)
+ except (ImportError, FontNotFound) as e:
+ # some dependency or font not installed
+ pytest.skip(str(e))
+
+ if cls.name != 'Raw tokens':
+ out = format(tokens, inst)
+ if cls.unicodeoutput:
+ assert type(out) is text_type, '%s: %r' % (cls, out)
+
+ inst = cls(encoding='utf-8')
+ out = format(tokens, inst)
+ assert type(out) is bytes, '%s: %r' % (cls, out)
+ # Cannot test for encoding, since formatters may have to escape
+ # non-ASCII characters.
+ else:
+ inst = cls()
+ out = format(tokens, inst)
+ assert type(out) is bytes, '%s: %r' % (cls, out)
def test_get_formatters():
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index 169d690d..c76c1089 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -15,13 +15,14 @@ import re
import sys
import tempfile
import unittest
+from os import path
-import support
from pygments import cmdline, highlight
from pygments.util import BytesIO, StringIO
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_cmdline.py')
-TESTFILE, TESTDIR = support.location(__file__)
TESTCODE = '''\
def func(args):
pass
@@ -254,7 +255,7 @@ class CmdLineTest(unittest.TestCase):
self.assertTrue('Error: cannot read' in e)
# lexer file is malformed
- e = self.check_failure('-l', 'support/empty.py',
+ e = self.check_failure('-l', path.join(TESTDIR, 'support', 'empty.py'),
'-x', TESTFILE)
self.assertTrue('Error: no valid CustomLexer class found' in e)
@@ -276,7 +277,7 @@ class CmdLineTest(unittest.TestCase):
self.assertTrue('Error: cannot read' in e)
# formatter file is malformed
- e = self.check_failure('-f', 'support/empty.py',
+ e = self.check_failure('-f', path.join(TESTDIR, 'support', 'empty.py'),
'-x', TESTFILE)
self.assertTrue('Error: no valid CustomFormatter class found' in e)
diff --git a/tests/test_examplefiles.py b/tests/test_examplefiles.py
index e208403b..943e80e7 100644
--- a/tests/test_examplefiles.py
+++ b/tests/test_examplefiles.py
@@ -14,12 +14,12 @@ import pprint
import difflib
import pickle
+import pytest
+
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.token import Error
from pygments.util import ClassNotFound
-import support
-
STORE_OUTPUT = False
STATS = {}
@@ -32,9 +32,11 @@ TESTDIR = os.path.dirname(__file__)
BAD_FILES_FOR_JYTHON = ('Object.st', 'all.nit', 'genclass.clj',
'ragel-cpp_rlscan')
-def test_example_files():
- global STATS
- STATS = {}
+
+def get_example_files():
+ # TODO: move this to a fixture
+ # global STATS
+ # STATS = {}
outdir = os.path.join(TESTDIR, 'examplefiles', 'output')
if STORE_OUTPUT and not os.path.isdir(outdir):
os.makedirs(outdir)
@@ -72,24 +74,26 @@ def test_example_files():
'nor is of the form <lexer>_filename '
'for overriding, thus no lexer found.'
% fn)
- yield check_lexer, lx, fn
-
- N = 7
- stats = list(STATS.items())
- stats.sort(key=lambda x: x[1][1])
- print('\nExample files that took longest absolute time:')
- for fn, t in stats[-N:]:
- print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t))
- print()
- stats.sort(key=lambda x: x[1][2])
- print('\nExample files that took longest relative time:')
- for fn, t in stats[-N:]:
- print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t))
-
-
-def check_lexer(lx, fn):
+ yield lx, fn
+
+ # N = 7
+ # stats = list(STATS.items())
+ # stats.sort(key=lambda x: x[1][1])
+ # print('\nExample files that took longest absolute time:')
+ # for fn, t in stats[-N:]:
+ # print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t))
+ # print()
+ # stats.sort(key=lambda x: x[1][2])
+ # print('\nExample files that took longest relative time:')
+ # for fn, t in stats[-N:]:
+ # print('%-30s %6d chars %8.2f ms %7.3f ms/char' % ((fn,) + t))
+
+
+@pytest.mark.parametrize('what', get_example_files())
+def test_examplefile(what):
+ lx, fn = what
if os.name == 'java' and fn in BAD_FILES_FOR_JYTHON:
- raise support.SkipTest('%s is a known bad file on Jython' % fn)
+ pytest.skip('%s is a known bad file on Jython' % fn)
absfn = os.path.join(TESTDIR, 'examplefiles', fn)
with open(absfn, 'rb') as fp:
text = fp.read()
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index 37efd6f0..c3b67333 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -14,16 +14,15 @@ import os
import re
import unittest
import tempfile
-from os.path import join, dirname, isfile
+from os import path
from pygments.util import StringIO
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
-import support
-
-TESTFILE, TESTDIR = support.location(__file__)
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_html_formatter.py')
with io.open(TESTFILE, encoding='utf-8') as fp:
tokensource = list(PythonLexer().get_tokens(fp.read()))
@@ -48,22 +47,23 @@ class HtmlFormatterTest(unittest.TestCase):
# CSS should be in /tmp directory
fmt1 = HtmlFormatter(full=True, cssfile='fmt1.css', outencoding='utf-8')
# CSS should be in TESTDIR (TESTDIR is absolute)
- fmt2 = HtmlFormatter(full=True, cssfile=join(TESTDIR, 'fmt2.css'),
+ fmt2 = HtmlFormatter(full=True, cssfile=path.join(TESTDIR, 'fmt2.css'),
outencoding='utf-8')
tfile = tempfile.NamedTemporaryFile(suffix='.html')
fmt1.format(tokensource, tfile)
try:
fmt2.format(tokensource, tfile)
- self.assertTrue(isfile(join(TESTDIR, 'fmt2.css')))
+ self.assertTrue(path.isfile(path.join(TESTDIR, 'fmt2.css')))
except IOError:
# test directory not writable
pass
tfile.close()
- self.assertTrue(isfile(join(dirname(tfile.name), 'fmt1.css')))
- os.unlink(join(dirname(tfile.name), 'fmt1.css'))
+ self.assertTrue(path.isfile(path.join(path.dirname(tfile.name),
+ 'fmt1.css')))
+ os.unlink(path.join(path.dirname(tfile.name), 'fmt1.css'))
try:
- os.unlink(join(TESTDIR, 'fmt2.css'))
+ os.unlink(path.join(TESTDIR, 'fmt2.css'))
except OSError:
pass
diff --git a/tests/test_irc_formatter.py b/tests/test_irc_formatter.py
index 18bcd58b..803b8f7d 100644
--- a/tests/test_irc_formatter.py
+++ b/tests/test_irc_formatter.py
@@ -9,17 +9,15 @@
from __future__ import print_function
-import re
import unittest
from pygments.util import StringIO
from pygments.lexers import PythonLexer
from pygments.formatters import IRCFormatter
-import support
-
tokensource = list(PythonLexer().get_tokens("lambda x: 123"))
+
class IRCFormatterTest(unittest.TestCase):
def test_correct_output(self):
hfmt = IRCFormatter()
@@ -27,4 +25,3 @@ class IRCFormatterTest(unittest.TestCase):
hfmt.format(tokensource, houtfile)
self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
-
diff --git a/tests/test_javascript.py b/tests/test_javascript.py
index a2dfb7e1..0e279d93 100644
--- a/tests/test_javascript.py
+++ b/tests/test_javascript.py
@@ -9,6 +9,8 @@
import unittest
+import pytest
+
from pygments.lexers import CoffeeScriptLexer
from pygments.token import Token
@@ -36,11 +38,10 @@ COFFEE_SLASH_GOLDEN = [
('a = 1 + /d/.test(a)', True),
]
-def test_coffee_slashes():
- for input_str, slashes_are_regex_here in COFFEE_SLASH_GOLDEN:
- yield coffee_runner, input_str, slashes_are_regex_here
-def coffee_runner(input_str, slashes_are_regex_here):
+@pytest.mark.parametrize('golden', COFFEE_SLASH_GOLDEN)
+def test_coffee_slashes(golden):
+ input_str, slashes_are_regex_here = golden
lex = CoffeeScriptLexer()
output = list(lex.get_tokens(input_str))
print(output)
@@ -49,6 +50,7 @@ def coffee_runner(input_str, slashes_are_regex_here):
is_regex = t is Token.String.Regex
assert is_regex == slashes_are_regex_here, (t, s)
+
class CoffeeTest(unittest.TestCase):
def setUp(self):
self.lexer = CoffeeScriptLexer()
diff --git a/tests/test_latex_formatter.py b/tests/test_latex_formatter.py
index aa4ac3bb..05f9facf 100644
--- a/tests/test_latex_formatter.py
+++ b/tests/test_latex_formatter.py
@@ -12,13 +12,15 @@ from __future__ import print_function
import os
import unittest
import tempfile
+from os import path
+
+import pytest
from pygments.formatters import LatexFormatter
from pygments.lexers import PythonLexer
-import support
-
-TESTFILE, TESTDIR = support.location(__file__)
+TESTDIR = path.dirname(path.abspath(__file__))
+TESTFILE = path.join(TESTDIR, 'test_latex_formatter.py')
class LatexFormatterTest(unittest.TestCase):
@@ -44,7 +46,7 @@ class LatexFormatterTest(unittest.TestCase):
po.stdout.close()
except OSError as e:
# latex not available
- raise support.SkipTest(e)
+ pytest.skip(str(e))
else:
if ret:
print(output)
diff --git a/tests/test_modeline.py b/tests/test_modeline.py
index 6e1f16a4..b1206949 100644
--- a/tests/test_modeline.py
+++ b/tests/test_modeline.py
@@ -12,10 +12,7 @@ from __future__ import print_function
from pygments import modeline
-def test_lexer_classes():
- def verify(buf):
- assert modeline.get_filetype_from_buffer(buf) == 'python'
-
+def test_modelines():
for buf in [
'vi: ft=python' + '\n' * 8,
'vi: ft=python' + '\n' * 8,
@@ -23,4 +20,4 @@ def test_lexer_classes():
'\n' * 8 + 'ex: filetype=python',
'\n' * 8 + 'vim: some,other,syn=python\n\n\n\n'
]:
- yield verify, buf
+ assert modeline.get_filetype_from_buffer(buf) == 'python'
diff --git a/tests/test_regexlexer.py b/tests/test_regexlexer.py
index adc05a93..7653f7f9 100644
--- a/tests/test_regexlexer.py
+++ b/tests/test_regexlexer.py
@@ -14,7 +14,7 @@ from pygments.lexer import RegexLexer
from pygments.lexer import default
-class TestLexer(RegexLexer):
+class MyLexer(RegexLexer):
"""Test tuple state transitions including #pop."""
tokens = {
'root': [
@@ -36,31 +36,31 @@ class TestLexer(RegexLexer):
class TupleTransTest(unittest.TestCase):
def test(self):
- lx = TestLexer()
+ lx = MyLexer()
toks = list(lx.get_tokens_unprocessed('abcde'))
self.assertEqual(toks, [
(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
(3, Text.Beer, 'd'), (4, Text.Root, 'e')])
def test_multiline(self):
- lx = TestLexer()
+ lx = MyLexer()
toks = list(lx.get_tokens_unprocessed('a\ne'))
self.assertEqual(toks, [
(0, Text.Root, 'a'), (1, Text, u'\n'), (2, Text.Root, 'e')])
def test_default(self):
- lx = TestLexer()
+ lx = MyLexer()
toks = list(lx.get_tokens_unprocessed('d'))
self.assertEqual(toks, [(0, Text.Beer, 'd')])
class PopEmptyTest(unittest.TestCase):
def test_regular(self):
- lx = TestLexer()
+ lx = MyLexer()
toks = list(lx.get_tokens_unprocessed('#e'))
self.assertEqual(toks, [(0, Text.Root, '#'), (1, Text.Root, 'e')])
def test_tuple(self):
- lx = TestLexer()
+ lx = MyLexer()
toks = list(lx.get_tokens_unprocessed('@e'))
self.assertEqual(toks, [(0, Text.Root, '@'), (1, Text.Root, 'e')])
diff --git a/tests/test_rtf_formatter.py b/tests/test_rtf_formatter.py
index 80ce01f5..90e24fbe 100644
--- a/tests/test_rtf_formatter.py
+++ b/tests/test_rtf_formatter.py
@@ -8,13 +8,13 @@
"""
import unittest
-from string_asserts import StringTests
from pygments.util import StringIO
from pygments.formatters import RtfFormatter
from pygments.lexers.special import TextLexer
-class RtfFormatterTest(StringTests, unittest.TestCase):
+
+class RtfFormatterTest(unittest.TestCase):
foot = (r'\par' '\n' r'}')
def _escape(self, string):
@@ -57,7 +57,7 @@ class RtfFormatterTest(StringTests, unittest.TestCase):
u"\t(WARNING: Partial Output of Result!)".format(
expected = expected,
result = result[:len(expected)]))
- self.assertStartsWith(result, expected, msg)
+ self.assertTrue(result.startswith(expected), msg)
def test_rtf_footer(self):
t = u''
@@ -68,7 +68,7 @@ class RtfFormatterTest(StringTests, unittest.TestCase):
u"\t(WARNING: Partial Output of Result!)".format(
expected = self._escape(expected),
result = self._escape(result[-len(expected):])))
- self.assertEndsWith(result, expected, msg)
+ self.assertTrue(result.endswith(expected), msg)
def test_ascii_characters(self):
t = u'a b c d ~'
@@ -77,16 +77,16 @@ class RtfFormatterTest(StringTests, unittest.TestCase):
if not result.endswith(self.foot):
return(unittest.skip('RTF Footer incorrect'))
msg = self._build_message(t=t, result=result, expected=expected)
- self.assertEndsWith(result, expected+self.foot, msg)
+ self.assertTrue(result.endswith(expected+self.foot), msg)
def test_escape_characters(self):
t = u'\\ {{'
result = self.format_rtf(t)
- expected = (r'\\ \{\{')
+ expected = r'\\ \{\{'
if not result.endswith(self.foot):
return(unittest.skip('RTF Footer incorrect'))
msg = self._build_message(t=t, result=result, expected=expected)
- self.assertEndsWith(result, expected+self.foot, msg)
+ self.assertTrue(result.endswith(expected+self.foot), msg)
def test_single_characters(self):
t = u'â € ¤ каждой'
@@ -96,7 +96,7 @@ class RtfFormatterTest(StringTests, unittest.TestCase):
if not result.endswith(self.foot):
return(unittest.skip('RTF Footer incorrect'))
msg = self._build_message(t=t, result=result, expected=expected)
- self.assertEndsWith(result, expected+self.foot, msg)
+ self.assertTrue(result.endswith(expected+self.foot), msg)
def test_double_characters(self):
t = u'က 힣 ↕ ↕︎ 鼖'
@@ -106,4 +106,4 @@ class RtfFormatterTest(StringTests, unittest.TestCase):
if not result.endswith(self.foot):
return(unittest.skip('RTF Footer incorrect'))
msg = self._build_message(t=t, result=result, expected=expected)
- self.assertEndsWith(result, expected+self.foot, msg)
+ self.assertTrue(result.endswith(expected+self.foot), msg)
diff --git a/tests/test_string_asserts.py b/tests/test_string_asserts.py
deleted file mode 100644
index 737ba200..00000000
--- a/tests/test_string_asserts.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- Pygments string assert utility tests
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import unittest
-from string_asserts import StringTests
-
-class TestStringTests(StringTests, unittest.TestCase):
-
- def test_startswith_correct(self):
- self.assertStartsWith("AAA", "A")
-
- # @unittest.expectedFailure not supported by nose
- def test_startswith_incorrect(self):
- self.assertRaises(AssertionError, self.assertStartsWith, "AAA", "B")
-
- # @unittest.expectedFailure not supported by nose
- def test_startswith_short(self):
- self.assertRaises(AssertionError, self.assertStartsWith, "A", "AA")
-
- def test_endswith_correct(self):
- self.assertEndsWith("AAA", "A")
-
- # @unittest.expectedFailure not supported by nose
- def test_endswith_incorrect(self):
- self.assertRaises(AssertionError, self.assertEndsWith, "AAA", "B")
-
- # @unittest.expectedFailure not supported by nose
- def test_endswith_short(self):
- self.assertRaises(AssertionError, self.assertEndsWith, "A", "AA")
diff --git a/tests/test_using_api.py b/tests/test_using_api.py
index 2ab70d09..3fdee591 100644
--- a/tests/test_using_api.py
+++ b/tests/test_using_api.py
@@ -12,7 +12,8 @@ import unittest
from pygments.lexer import using, bygroups, this, RegexLexer
from pygments.token import String, Text, Keyword
-class TestLexer(RegexLexer):
+
+class MyLexer(RegexLexer):
tokens = {
'root': [
(r'#.*',
@@ -31,10 +32,10 @@ class UsingStateTest(unittest.TestCase):
def test_basic(self):
expected = [(Text, 'a'), (String, '"'), (Keyword, 'bcd'),
(String, '"'), (Text, 'e\n')]
- t = list(TestLexer().get_tokens('a"bcd"e'))
+ t = list(MyLexer().get_tokens('a"bcd"e'))
self.assertEqual(t, expected)
def test_error(self):
def gen():
- return list(TestLexer().get_tokens('#a'))
+ return list(MyLexer().get_tokens('#a'))
self.assertRaises(KeyError, gen)