1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
|
# -*- coding: utf-8 -*-
"""
Pygments basic API tests
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import random
import unittest
from pygments import lexers, formatters, lex, format
from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
from pygments.util import text_type, StringIO, BytesIO, xrange, ClassNotFound
import support
TESTFILE, TESTDIR = support.location(__file__)
test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'
def test_lexer_instantiate_all():
# instantiate every lexer, to see if the token type defs are correct
def verify(name):
getattr(lexers, name)
for x in lexers.LEXERS:
yield verify, x
def test_lexer_classes():
# test that every lexer class has the correct public API
def verify(cls):
assert type(cls.name) is str
for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes':
assert hasattr(cls, attr)
assert type(getattr(cls, attr)) is list, \
"%s: %s attribute wrong" % (cls, attr)
result = cls.analyse_text("abc")
assert isinstance(result, float) and 0.0 <= result <= 1.0
result = cls.analyse_text(".abc")
assert isinstance(result, float) and 0.0 <= result <= 1.0
assert all(al.lower() == al for al in cls.aliases)
inst = cls(opt1="val1", opt2="val2")
if issubclass(cls, RegexLexer):
if not hasattr(cls, '_tokens'):
# if there's no "_tokens", the lexer has to be one with
# multiple tokendef variants
assert cls.token_variants
for variant in cls.tokens:
assert 'root' in cls.tokens[variant]
else:
assert 'root' in cls._tokens, \
'%s has no root state' % cls
if cls.name in ['XQuery', 'Opa']: # XXX temporary
return
try:
tokens = list(inst.get_tokens(test_content))
except KeyboardInterrupt:
raise KeyboardInterrupt(
'interrupted %s.get_tokens(): test_content=%r' %
(cls.__name__, test_content))
txt = ""
for token in tokens:
assert isinstance(token, tuple)
assert isinstance(token[0], _TokenType)
assert isinstance(token[1], text_type)
txt += token[1]
assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
(cls.name, test_content, txt)
for lexer in lexers._iter_lexerclasses(plugins=False):
yield verify, lexer
def test_lexer_options():
# test that the basic options work
def ensure(tokens, output):
concatenated = ''.join(token[1] for token in tokens)
assert concatenated == output, \
'%s: %r != %r' % (lexer, concatenated, output)
def verify(cls):
inst = cls(stripnl=False)
ensure(inst.get_tokens('a\nb'), 'a\nb\n')
ensure(inst.get_tokens('\n\n\n'), '\n\n\n')
inst = cls(stripall=True)
ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n')
# some lexers require full lines in input
if ('ConsoleLexer' not in cls.__name__ and
'SessionLexer' not in cls.__name__ and
not cls.__name__.startswith('Literate') and
cls.__name__ not in ('ErlangShellLexer', 'RobotFrameworkLexer')):
inst = cls(ensurenl=False)
ensure(inst.get_tokens('a\nb'), 'a\nb')
inst = cls(ensurenl=False, stripall=True)
ensure(inst.get_tokens('a\nb\n\n'), 'a\nb')
for lexer in lexers._iter_lexerclasses(plugins=False):
if lexer.__name__ == 'RawTokenLexer':
# this one is special
continue
yield verify, lexer
def test_get_lexers():
# test that the lexers functions work
def verify(func, args):
x = func(opt='val', *args)
assert isinstance(x, lexers.PythonLexer)
assert x.options["opt"] == "val"
for func, args in [(lexers.get_lexer_by_name, ("python",)),
(lexers.get_lexer_for_filename, ("test.py",)),
(lexers.get_lexer_for_mimetype, ("text/x-python",)),
(lexers.guess_lexer, ("#!/usr/bin/python -O\nprint",)),
(lexers.guess_lexer_for_filename, ("a.py", "<%= @foo %>"))
]:
yield verify, func, args
for cls, (_, lname, aliases, _, mimetypes) in lexers.LEXERS.items():
assert cls == lexers.find_lexer_class(lname).__name__
for alias in aliases:
assert cls == lexers.get_lexer_by_name(alias).__class__.__name__
for mimetype in mimetypes:
assert cls == lexers.get_lexer_for_mimetype(mimetype).__class__.__name__
try:
lexers.get_lexer_by_name(None)
except ClassNotFound:
pass
else:
raise Exception
def test_formatter_public_api():
# test that every formatter class has the correct public API
ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
string_out = StringIO()
bytes_out = BytesIO()
def verify(formatter):
info = formatters.FORMATTERS[formatter.__name__]
assert len(info) == 5
assert info[1], "missing formatter name"
assert info[2], "missing formatter aliases"
assert info[4], "missing formatter docstring"
try:
inst = formatter(opt1="val1")
except (ImportError, FontNotFound):
raise support.SkipTest
try:
inst.get_style_defs()
except NotImplementedError:
# may be raised by formatters for which it doesn't make sense
pass
if formatter.unicodeoutput:
inst.format(ts, string_out)
else:
inst.format(ts, bytes_out)
for name in formatters.FORMATTERS:
formatter = getattr(formatters, name)
yield verify, formatter
def test_formatter_encodings():
from pygments.formatters import HtmlFormatter
# unicode output
fmt = HtmlFormatter()
tokens = [(Text, u"ä")]
out = format(tokens, fmt)
assert type(out) is text_type
assert u"ä" in out
# encoding option
fmt = HtmlFormatter(encoding="latin1")
tokens = [(Text, u"ä")]
assert u"ä".encode("latin1") in format(tokens, fmt)
# encoding and outencoding option
fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
tokens = [(Text, u"ä")]
assert u"ä".encode("utf8") in format(tokens, fmt)
def test_formatter_unicode_handling():
# test that the formatter supports encoding and Unicode
tokens = list(lexers.PythonLexer(encoding='utf-8').
get_tokens("def f(): 'ä'"))
def verify(formatter):
try:
inst = formatter(encoding=None)
except (ImportError, FontNotFound):
# some dependency or font not installed
raise support.SkipTest
if formatter.name != 'Raw tokens':
out = format(tokens, inst)
if formatter.unicodeoutput:
assert type(out) is text_type, '%s: %r' % (formatter, out)
inst = formatter(encoding='utf-8')
out = format(tokens, inst)
assert type(out) is bytes, '%s: %r' % (formatter, out)
# Cannot test for encoding, since formatters may have to escape
# non-ASCII characters.
else:
inst = formatter()
out = format(tokens, inst)
assert type(out) is bytes, '%s: %r' % (formatter, out)
for formatter, info in formatters.FORMATTERS.items():
# this tests the automatic importing as well
fmter = getattr(formatters, formatter)
yield verify, fmter
def test_get_formatters():
# test that the formatters functions work
x = formatters.get_formatter_by_name("html", opt="val")
assert isinstance(x, formatters.HtmlFormatter)
assert x.options["opt"] == "val"
x = formatters.get_formatter_for_filename("a.html", opt="val")
assert isinstance(x, formatters.HtmlFormatter)
assert x.options["opt"] == "val"
def test_styles():
# minimal style test
from pygments.formatters import HtmlFormatter
HtmlFormatter(style="pastie")
def test_bare_class_handler():
from pygments.formatters import HtmlFormatter
from pygments.lexers import PythonLexer
try:
lex('test\n', PythonLexer)
except TypeError as e:
assert 'lex() argument must be a lexer instance' in str(e)
else:
assert False, 'nothing raised'
try:
format([], HtmlFormatter)
except TypeError as e:
assert 'format() argument must be a formatter instance' in str(e)
else:
assert False, 'nothing raised'
class FiltersTest(unittest.TestCase):
def test_basic(self):
filters_args = [
('whitespace', {'spaces': True, 'tabs': True, 'newlines': True}),
('whitespace', {'wstokentype': False, 'spaces': True}),
('highlight', {'names': ['isinstance', 'lexers', 'x']}),
('codetagify', {'codetags': 'API'}),
('keywordcase', {'case': 'capitalize'}),
('raiseonerror', {}),
('gobble', {'n': 4}),
('tokenmerge', {}),
]
for x, args in filters_args:
lx = lexers.PythonLexer()
lx.add_filter(x, **args)
with open(TESTFILE, 'rb') as fp:
text = fp.read().decode('utf-8')
tokens = list(lx.get_tokens(text))
self.assertTrue(all(isinstance(t[1], text_type)
for t in tokens),
'%s filter did not return Unicode' % x)
roundtext = ''.join([t[1] for t in tokens])
if x not in ('whitespace', 'keywordcase', 'gobble'):
# these filters change the text
self.assertEqual(roundtext, text,
"lexer roundtrip with %s filter failed" % x)
def test_raiseonerror(self):
lx = lexers.PythonLexer()
lx.add_filter('raiseonerror', excclass=RuntimeError)
self.assertRaises(RuntimeError, list, lx.get_tokens('$'))
def test_whitespace(self):
lx = lexers.PythonLexer()
lx.add_filter('whitespace', spaces='%')
with open(TESTFILE, 'rb') as fp:
text = fp.read().decode('utf-8')
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.assertFalse(' ' in lxtext)
def test_keywordcase(self):
lx = lexers.PythonLexer()
lx.add_filter('keywordcase', case='capitalize')
with open(TESTFILE, 'rb') as fp:
text = fp.read().decode('utf-8')
lxtext = ''.join([t[1] for t in list(lx.get_tokens(text))])
self.assertTrue('Def' in lxtext and 'Class' in lxtext)
def test_codetag(self):
lx = lexers.PythonLexer()
lx.add_filter('codetagify')
text = u'# BUG: text'
tokens = list(lx.get_tokens(text))
self.assertEqual('# ', tokens[0][1])
self.assertEqual('BUG', tokens[1][1])
def test_codetag_boundary(self):
# ticket #368
lx = lexers.PythonLexer()
lx.add_filter('codetagify')
text = u'# DEBUG: text'
tokens = list(lx.get_tokens(text))
self.assertEqual('# DEBUG: text', tokens[0][1])
|