summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2020-01-04 16:10:50 +0100
committerGeorg Brandl <georg@python.org>2020-01-04 16:10:50 +0100
commit58c34950dec138f4e3e52a56031b9a817fb54295 (patch)
tree4a1f10163e09d2e6bc3f858bcd23f14dabc69304
parentc74e89cd2a11c77938010c17cfc8579a0c5f40fb (diff)
downloadpygments-git-58c34950dec138f4e3e52a56031b9a817fb54295.tar.gz
Remove Python 2 compatibility
* remove 2/3 shims in pygments.util * update setup.py metadata
-rw-r--r--.github/workflows/build.yaml2
-rw-r--r--CHANGES2
-rw-r--r--pygments/__init__.py3
-rw-r--r--pygments/cmdline.py27
-rw-r--r--pygments/filters/__init__.py9
-rw-r--r--pygments/formatter.py4
-rw-r--r--pygments/formatters/__init__.py8
-rw-r--r--pygments/formatters/html.py10
-rw-r--r--pygments/formatters/img.py4
-rw-r--r--pygments/formatters/latex.py9
-rw-r--r--pygments/formatters/terminal.py6
-rw-r--r--pygments/formatters/terminal256.py6
-rw-r--r--pygments/lexer.py15
-rw-r--r--pygments/lexers/__init__.py18
-rw-r--r--pygments/lexers/_lua_builtins.py2
-rw-r--r--pygments/lexers/_scilab_builtins.py2
-rw-r--r--pygments/lexers/css.py5
-rw-r--r--pygments/lexers/dotnet.py6
-rw-r--r--pygments/lexers/javascript.py6
-rw-r--r--pygments/lexers/php.py5
-rw-r--r--pygments/lexers/robotframework.py3
-rw-r--r--pygments/lexers/scripting.py4
-rw-r--r--pygments/lexers/special.py5
-rw-r--r--pygments/lexers/sql.py5
-rw-r--r--pygments/style.py4
-rw-r--r--pygments/unistring.py4
-rw-r--r--pygments/util.py65
-rwxr-xr-xsetup.py4
-rw-r--r--tests/test_basic_api.py13
-rw-r--r--tests/test_cmdline.py19
-rw-r--r--tests/test_html_formatter.py2
-rw-r--r--tests/test_irc_formatter.py3
-rw-r--r--tests/test_rtf_formatter.py3
-rw-r--r--tests/test_terminal_formatter.py2
-rw-r--r--tests/test_unistring.py3
-rw-r--r--tests/test_util.py11
36 files changed, 107 insertions, 192 deletions
diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index a4c90115..f95482c7 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -9,7 +9,7 @@ jobs:
strategy:
max-parallel: 4
matrix:
- python-version: [2.7, 3.5, 3.6, 3.7, 3.8, pypy2, pypy3]
+ python-version: [3.5, 3.6, 3.7, 3.8, pypy3]
steps:
- uses: actions/checkout@v1
diff --git a/CHANGES b/CHANGES
index 049fe4c0..3c7785ae 100644
--- a/CHANGES
+++ b/CHANGES
@@ -11,6 +11,8 @@ Version 2.6
-----------
(not released yet)
+- Python 2 is no longer supported.
+
- Added lexers:
* Linux kernel logs (PR#1310)
diff --git a/pygments/__init__.py b/pygments/__init__.py
index 89efc350..e253503e 100644
--- a/pygments/__init__.py
+++ b/pygments/__init__.py
@@ -26,8 +26,7 @@
:license: BSD, see LICENSE for details.
"""
import sys
-
-from pygments.util import StringIO, BytesIO
+from io import StringIO, BytesIO
__version__ = '2.5.2'
__docformat__ = 'restructuredtext'
diff --git a/pygments/cmdline.py b/pygments/cmdline.py
index 34752d66..ea9f1e2f 100644
--- a/pygments/cmdline.py
+++ b/pygments/cmdline.py
@@ -18,7 +18,8 @@ from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
- guess_decode, guess_decode_from_terminal, terminal_encoding
+ guess_decode, guess_decode_from_terminal, terminal_encoding, \
+ UnclosingTextIOWrapper
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
from pygments.lexers.special import TextLexer
@@ -397,11 +398,7 @@ def main_inner(popts, args, usage):
elif '-s' not in opts: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
- if sys.version_info > (3,):
- # Python 3: we have to use .buffer to get a binary stream
- code = sys.stdin.buffer.read()
- else:
- code = sys.stdin.read()
+ code = sys.stdin.buffer.read() # use .buffer to get a binary stream
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
@@ -466,11 +463,7 @@ def main_inner(popts, args, usage):
fmter = Terminal256Formatter(**parsed_opts)
else:
fmter = TerminalFormatter(**parsed_opts)
- if sys.version_info > (3,):
- # Python 3: we have to use .buffer to get a binary stream
- outfile = sys.stdout.buffer
- else:
- outfile = sys.stdout
+ outfile = sys.stdout.buffer
# determine output encoding if not explicitly selected
if not outencoding:
@@ -485,10 +478,8 @@ def main_inner(popts, args, usage):
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
# unfortunately colorama doesn't support binary streams on Py3
- if sys.version_info > (3,):
- from pygments.util import UnclosingTextIOWrapper
- outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
- fmter.encoding = None
+ outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
+ fmter.encoding = None
try:
import colorama.initialise
except ImportError:
@@ -515,11 +506,7 @@ def main_inner(popts, args, usage):
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
- if sys.version_info > (3,):
- # Python 3: we have to use .buffer to get a binary stream
- line = sys.stdin.buffer.readline()
- else:
- line = sys.stdin.readline()
+ line = sys.stdin.buffer.readline()
if not line:
break
if not inencoding:
diff --git a/pygments/filters/__init__.py b/pygments/filters/__init__.py
index 3fe6caa7..3fd2fee0 100644
--- a/pygments/filters/__init__.py
+++ b/pygments/filters/__init__.py
@@ -16,7 +16,7 @@ from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
- get_choice_opt, ClassNotFound, OptionError, text_type, string_types
+ get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
@@ -113,7 +113,7 @@ class KeywordCaseFilter(Filter):
Filter.__init__(self, **options)
case = get_choice_opt(options, 'case',
['lower', 'upper', 'capitalize'], 'lower')
- self.convert = getattr(text_type, case)
+ self.convert = getattr(str, case)
def filter(self, lexer, stream):
for ttype, value in stream:
@@ -233,7 +233,7 @@ class VisibleWhitespaceFilter(Filter):
('tabs', u'»'),
('newlines', u'¶')]:
opt = options.get(name, False)
- if isinstance(opt, string_types) and len(opt) == 1:
+ if isinstance(opt, str) and len(opt) == 1:
setattr(self, name, opt)
else:
setattr(self, name, (opt and default or ''))
@@ -250,6 +250,7 @@ class VisibleWhitespaceFilter(Filter):
tabs = self.tabs or u'\t'
newlines = self.newlines or u'\n'
regex = re.compile(r'\s')
+
def replacefunc(wschar):
if wschar == ' ':
return spaces
@@ -302,7 +303,7 @@ class GobbleFilter(Filter):
def filter(self, lexer, stream):
n = self.n
- left = n # How many characters left to gobble.
+ left = n # How many characters left to gobble.
for ttype, value in stream:
# Remove ``left`` tokens from first line, ``n`` from all others.
parts = value.split('\n')
diff --git a/pygments/formatter.py b/pygments/formatter.py
index f09f6e3c..efa90e51 100644
--- a/pygments/formatter.py
+++ b/pygments/formatter.py
@@ -11,14 +11,14 @@
import codecs
-from pygments.util import get_bool_opt, string_types
+from pygments.util import get_bool_opt
from pygments.styles import get_style_by_name
__all__ = ['Formatter']
def _lookup_style(style):
- if isinstance(style, string_types):
+ if isinstance(style, str):
return get_style_by_name(style)
return style
diff --git a/pygments/formatters/__init__.py b/pygments/formatters/__init__.py
index 6f1130a8..9ff30643 100644
--- a/pygments/formatters/__init__.py
+++ b/pygments/formatters/__init__.py
@@ -17,7 +17,7 @@ from os.path import basename
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
-from pygments.util import ClassNotFound, itervalues
+from pygments.util import ClassNotFound
__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
@@ -45,7 +45,7 @@ def _load_formatters(module_name):
def get_all_formatters():
"""Return a generator for all formatter classes."""
# NB: this returns formatter classes, not info like get_all_lexers().
- for info in itervalues(FORMATTERS):
+ for info in FORMATTERS.values():
if info[1] not in _formatter_cache:
_load_formatters(info[0])
yield _formatter_cache[info[1]]
@@ -58,7 +58,7 @@ def find_formatter_class(alias):
Returns None if not found.
"""
- for module_name, name, aliases, _, _ in itervalues(FORMATTERS):
+ for module_name, name, aliases, _, _ in FORMATTERS.values():
if alias in aliases:
if name not in _formatter_cache:
_load_formatters(module_name)
@@ -121,7 +121,7 @@ def get_formatter_for_filename(fn, **options):
Raises ClassNotFound if not found.
"""
fn = basename(fn)
- for modname, name, _, filenames, _ in itervalues(FORMATTERS):
+ for modname, name, _, filenames, _ in FORMATTERS.values():
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _formatter_cache:
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index be9d8534..530dede9 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -14,11 +14,11 @@ from __future__ import print_function
import os
import sys
import os.path
+from io import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
-from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
- StringIO, string_types, iteritems
+from pygments.util import get_bool_opt, get_int_opt, get_list_opt
try:
import ctags
@@ -41,12 +41,14 @@ def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
+
def webify(color):
if color.startswith('calc') or color.startswith('var'):
return color
else:
return '#' + color
+
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
@@ -497,7 +499,7 @@ class HtmlFormatter(Formatter):
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
- if isinstance(arg, string_types):
+ if isinstance(arg, str):
args = [arg]
else:
args = list(arg)
@@ -511,7 +513,7 @@ class HtmlFormatter(Formatter):
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
- for cls, (style, ttype, level) in iteritems(self.class2style)
+ for cls, (style, ttype, level) in self.class2style.items()
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
diff --git a/pygments/formatters/img.py b/pygments/formatters/img.py
index 6bb33644..5f0dcc56 100644
--- a/pygments/formatters/img.py
+++ b/pygments/formatters/img.py
@@ -14,7 +14,7 @@ import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
- get_choice_opt, xrange
+ get_choice_opt
import subprocess
@@ -504,7 +504,7 @@ class ImageFormatter(Formatter):
"""
if not self.line_numbers:
return
- for p in xrange(self.maxlineno):
+ for p in range(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
diff --git a/pygments/formatters/latex.py b/pygments/formatters/latex.py
index 7f6aa9e3..17c3fe11 100644
--- a/pygments/formatters/latex.py
+++ b/pygments/formatters/latex.py
@@ -11,11 +11,12 @@
from __future__ import division
+from io import StringIO
+
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
-from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
- iteritems
+from pygments.util import get_bool_opt, get_int_opt
__all__ = ['LatexFormatter']
@@ -322,7 +323,7 @@ class LatexFormatter(Formatter):
"""
cp = self.commandprefix
styles = []
- for name, definition in iteritems(self.cmd2def):
+ for name, definition in self.cmd2def.items():
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
@@ -354,7 +355,7 @@ class LatexFormatter(Formatter):
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
- for i in xrange(1, len(value)):
+ for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
diff --git a/pygments/formatters/terminal.py b/pygments/formatters/terminal.py
index e60bde91..f0f3d7ae 100644
--- a/pygments/formatters/terminal.py
+++ b/pygments/formatters/terminal.py
@@ -92,12 +92,6 @@ class TerminalFormatter(Formatter):
self._lineno = 0
def format(self, tokensource, outfile):
- # hack: if the output is a terminal and has an encoding set,
- # use that to avoid unicode encode problems
- if not self.encoding and hasattr(outfile, "encoding") and \
- hasattr(outfile, "isatty") and outfile.isatty() and \
- sys.version_info < (3,):
- self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def _write_lineno(self, outfile):
diff --git a/pygments/formatters/terminal256.py b/pygments/formatters/terminal256.py
index b6bf9376..30e6e1e2 100644
--- a/pygments/formatters/terminal256.py
+++ b/pygments/formatters/terminal256.py
@@ -239,12 +239,6 @@ class Terminal256Formatter(Formatter):
escape.reset_string())
def format(self, tokensource, outfile):
- # hack: if the output is a terminal and has an encoding set,
- # use that to avoid unicode encode problems
- if not self.encoding and hasattr(outfile, "encoding") and \
- hasattr(outfile, "isatty") and outfile.isatty() and \
- sys.version_info < (3,):
- self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
diff --git a/pygments/lexer.py b/pygments/lexer.py
index 56a7e1e8..5614a7d4 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -19,7 +19,7 @@ from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
- make_analysator, text_type, add_metaclass, iteritems, Future, guess_decode
+ make_analysator, Future, guess_decode
from pygments.regexopt import regex_opt
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
@@ -48,8 +48,7 @@ class LexerMeta(type):
return type.__new__(mcs, name, bases, d)
-@add_metaclass(LexerMeta)
-class Lexer(object):
+class Lexer(metaclass=LexerMeta):
"""
Lexer for a specific language.
@@ -145,7 +144,7 @@ class Lexer(object):
Also preprocess the text, i.e. expand tabs and strip it if
wanted and applies registered filters.
"""
- if not isinstance(text, text_type):
+ if not isinstance(text, str):
if self.encoding == 'guess':
text, _ = guess_decode(text)
elif self.encoding == 'chardet':
@@ -536,7 +535,7 @@ class RegexLexerMeta(LexerMeta):
for c in cls.__mro__:
toks = c.__dict__.get('tokens', {})
- for state, items in iteritems(toks):
+ for state, items in toks.items():
curitems = tokens.get(state)
if curitems is None:
# N.b. because this is assigned by reference, sufficiently
@@ -582,8 +581,7 @@ class RegexLexerMeta(LexerMeta):
return type.__call__(cls, *args, **kwds)
-@add_metaclass(RegexLexerMeta)
-class RegexLexer(Lexer):
+class RegexLexer(Lexer, metaclass=RegexLexerMeta):
"""
Base for simple stateful regular expression-based lexers.
Simplifies the lexing process so that you need only
@@ -850,8 +848,7 @@ class ProfilingRegexLexerMeta(RegexLexerMeta):
return match_func
-@add_metaclass(ProfilingRegexLexerMeta)
-class ProfilingRegexLexer(RegexLexer):
+class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
"""Drop-in replacement for RegexLexer that does profiling of its regexes."""
_prof_data = []
diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py
index 100d8076..a5691171 100644
--- a/pygments/lexers/__init__.py
+++ b/pygments/lexers/__init__.py
@@ -18,7 +18,7 @@ from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
-from pygments.util import ClassNotFound, itervalues, guess_decode, text_type
+from pygments.util import ClassNotFound, guess_decode
COMPAT = {
'Python3Lexer': 'PythonLexer',
@@ -52,7 +52,7 @@ def get_all_lexers():
"""Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
- for item in itervalues(LEXERS):
+ for item in LEXERS.values():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
@@ -66,7 +66,7 @@ def find_lexer_class(name):
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
- for module_name, lname, aliases, _, _ in itervalues(LEXERS):
+ for module_name, lname, aliases, _, _ in LEXERS.values():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
@@ -86,7 +86,7 @@ def find_lexer_class_by_name(_alias):
if not _alias:
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
- for module_name, name, aliases, _, _ in itervalues(LEXERS):
+ for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
@@ -107,7 +107,7 @@ def get_lexer_by_name(_alias, **options):
raise ClassNotFound('no lexer for alias %r found' % _alias)
# lookup builtin lexers
- for module_name, name, aliases, _, _ in itervalues(LEXERS):
+ for module_name, name, aliases, _, _ in LEXERS.values():
if _alias.lower() in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
@@ -164,7 +164,7 @@ def find_lexer_class_for_filename(_fn, code=None):
"""
matches = []
fn = basename(_fn)
- for modname, name, _, filenames, _ in itervalues(LEXERS):
+ for modname, name, _, filenames, _ in LEXERS.values():
for filename in filenames:
if _fn_matches(fn, filename):
if name not in _lexer_cache:
@@ -175,7 +175,7 @@ def find_lexer_class_for_filename(_fn, code=None):
if _fn_matches(fn, filename):
matches.append((cls, filename))
- if sys.version_info > (3,) and isinstance(code, bytes):
+ if isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = guess_decode(code)
@@ -216,7 +216,7 @@ def get_lexer_for_mimetype(_mime, **options):
Raises ClassNotFound if not found.
"""
- for modname, name, _, _, mimetypes in itervalues(LEXERS):
+ for modname, name, _, _, mimetypes in LEXERS.values():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
@@ -293,7 +293,7 @@ def guess_lexer_for_filename(_fn, _text, **options):
def guess_lexer(_text, **options):
"""Guess a lexer by strong distinctions in the text (eg, shebang)."""
- if not isinstance(_text, text_type):
+ if not isinstance(_text, str):
inencoding = options.get('inencoding', options.get('encoding'))
if inencoding:
_text = _text.decode(inencoding or 'utf8')
diff --git a/pygments/lexers/_lua_builtins.py b/pygments/lexers/_lua_builtins.py
index ca3acb1c..065f674b 100644
--- a/pygments/lexers/_lua_builtins.py
+++ b/pygments/lexers/_lua_builtins.py
@@ -288,7 +288,7 @@ if __name__ == '__main__': # pragma: no cover
print('>> %s' % full_function_name)
m = get_function_module(full_function_name)
modules.setdefault(m, []).append(full_function_name)
- modules = {k: tuple(v) for k, v in modules.iteritems()}
+ modules = {k: tuple(v) for k, v in modules.items()}
regenerate(__file__, modules)
diff --git a/pygments/lexers/_scilab_builtins.py b/pygments/lexers/_scilab_builtins.py
index b26a0085..4b6886f9 100644
--- a/pygments/lexers/_scilab_builtins.py
+++ b/pygments/lexers/_scilab_builtins.py
@@ -3089,6 +3089,6 @@ mclose(fd)\n''' % var_type)
with open(__file__, 'w') as f:
f.write(header)
f.write('# Autogenerated\n\n')
- for k, v in sorted(new_data.iteritems()):
+ for k, v in sorted(new_data.items()):
f.write(format_lines(k + '_kw', v) + '\n\n')
f.write(footer)
diff --git a/pygments/lexers/css.py b/pygments/lexers/css.py
index 4c77efe0..6209be0c 100644
--- a/pygments/lexers/css.py
+++ b/pygments/lexers/css.py
@@ -16,7 +16,6 @@ from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
default, words, inherit
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
-from pygments.util import iteritems
__all__ = ['CssLexer', 'SassLexer', 'ScssLexer', 'LessCssLexer']
@@ -612,7 +611,7 @@ class SassLexer(ExtendedRegexLexer):
(r"\*/", Comment, '#pop'),
],
}
- for group, common in iteritems(common_sass_tokens):
+ for group, common in common_sass_tokens.items():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
@@ -662,7 +661,7 @@ class ScssLexer(RegexLexer):
(r"\*/", Comment, '#pop'),
],
}
- for group, common in iteritems(common_sass_tokens):
+ for group, common in common_sass_tokens.items():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
index 458a9eb4..83946054 100644
--- a/pygments/lexers/dotnet.py
+++ b/pygments/lexers/dotnet.py
@@ -14,7 +14,7 @@ from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
-from pygments.util import get_choice_opt, iteritems
+from pygments.util import get_choice_opt
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
@@ -71,7 +71,7 @@ class CSharpLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in iteritems(levels):
+ for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
@@ -184,7 +184,7 @@ class NemerleLexer(RegexLexer):
tokens = {}
token_variants = True
- for levelname, cs_ident in iteritems(levels):
+ for levelname, cs_ident in levels.items():
tokens[levelname] = {
'root': [
# method names
diff --git a/pygments/lexers/javascript.py b/pygments/lexers/javascript.py
index 665fc28e..9da36c43 100644
--- a/pygments/lexers/javascript.py
+++ b/pygments/lexers/javascript.py
@@ -15,7 +15,7 @@ from pygments.lexer import RegexLexer, include, bygroups, default, using, \
this, words, combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
-from pygments.util import get_bool_opt, iteritems
+from pygments.util import get_bool_opt
import pygments.unistring as uni
__all__ = ['JavascriptLexer', 'KalLexer', 'LiveScriptLexer', 'DartLexer',
@@ -767,9 +767,9 @@ class LassoLexer(RegexLexer):
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lasso_builtins import BUILTINS, MEMBERS
- for key, value in iteritems(BUILTINS):
+ for key, value in BUILTINS.items():
self._builtins.update(value)
- for key, value in iteritems(MEMBERS):
+ for key, value in MEMBERS.items():
self._members.update(value)
RegexLexer.__init__(self, **options)
diff --git a/pygments/lexers/php.py b/pygments/lexers/php.py
index 680f0573..4f06d216 100644
--- a/pygments/lexers/php.py
+++ b/pygments/lexers/php.py
@@ -15,8 +15,7 @@ from pygments.lexer import RegexLexer, include, bygroups, default, using, \
this, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
-from pygments.util import get_bool_opt, get_list_opt, iteritems, \
- shebang_matches
+from pygments.util import get_bool_opt, get_list_opt, shebang_matches
__all__ = ['ZephirLexer', 'PhpLexer']
@@ -245,7 +244,7 @@ class PhpLexer(RegexLexer):
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._php_builtins import MODULES
- for key, value in iteritems(MODULES):
+ for key, value in MODULES.items():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
diff --git a/pygments/lexers/robotframework.py b/pygments/lexers/robotframework.py
index 642c90c5..284fdcf5 100644
--- a/pygments/lexers/robotframework.py
+++ b/pygments/lexers/robotframework.py
@@ -27,7 +27,6 @@ import re
from pygments.lexer import Lexer
from pygments.token import Token
-from pygments.util import text_type
__all__ = ['RobotFrameworkLexer']
@@ -80,7 +79,7 @@ class RobotFrameworkLexer(Lexer):
for value, token in row_tokenizer.tokenize(row):
for value, token in var_tokenizer.tokenize(value, token):
if value:
- yield index, token, text_type(value)
+ yield index, token, str(value)
index += len(value)
diff --git a/pygments/lexers/scripting.py b/pygments/lexers/scripting.py
index a340f8e0..8ce425fc 100644
--- a/pygments/lexers/scripting.py
+++ b/pygments/lexers/scripting.py
@@ -15,7 +15,7 @@ from pygments.lexer import RegexLexer, include, bygroups, default, combined, \
words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace, Other
-from pygments.util import get_bool_opt, get_list_opt, iteritems
+from pygments.util import get_bool_opt, get_list_opt
__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer',
'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer',
@@ -142,7 +142,7 @@ class LuaLexer(RegexLexer):
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._lua_builtins import MODULES
- for mod, func in iteritems(MODULES):
+ for mod, func in MODULES.items():
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py
index 4016c594..e97f1944 100644
--- a/pygments/lexers/special.py
+++ b/pygments/lexers/special.py
@@ -10,10 +10,11 @@
"""
import re
+from io import BytesIO
from pygments.lexer import Lexer
from pygments.token import Token, Error, Text
-from pygments.util import get_choice_opt, text_type, BytesIO
+from pygments.util import get_choice_opt
__all__ = ['TextLexer', 'RawTokenLexer']
@@ -64,7 +65,7 @@ class RawTokenLexer(Lexer):
Lexer.__init__(self, **options)
def get_tokens(self, text):
- if isinstance(text, text_type):
+ if isinstance(text, str):
# raw token stream never has any non-ASCII characters
text = text.encode('ascii')
if self.compress == 'gz':
diff --git a/pygments/lexers/sql.py b/pygments/lexers/sql.py
index afcaa6d4..1a53b0f6 100644
--- a/pygments/lexers/sql.py
+++ b/pygments/lexers/sql.py
@@ -44,7 +44,6 @@ from pygments.lexer import Lexer, RegexLexer, do_insertions, bygroups, words
from pygments.token import Punctuation, Whitespace, Text, Comment, Operator, \
Keyword, Name, String, Number, Generic
from pygments.lexers import get_lexer_by_name, ClassNotFound
-from pygments.util import iteritems
from pygments.lexers._postgres_builtins import KEYWORDS, DATATYPES, \
PSEUDO_TYPES, PLPGSQL_KEYWORDS
@@ -212,7 +211,7 @@ class PlPgsqlLexer(PostgresBase, RegexLexer):
mimetypes = ['text/x-plpgsql']
flags = re.IGNORECASE
- tokens = {k: l[:] for (k, l) in iteritems(PostgresLexer.tokens)}
+ tokens = {k: l[:] for (k, l) in PostgresLexer.tokens.items()}
# extend the keywords list
for i, pattern in enumerate(tokens['root']):
@@ -246,7 +245,7 @@ class PsqlRegexLexer(PostgresBase, RegexLexer):
aliases = [] # not public
flags = re.IGNORECASE
- tokens = {k: l[:] for (k, l) in iteritems(PostgresLexer.tokens)}
+ tokens = {k: l[:] for (k, l) in PostgresLexer.tokens.items()}
tokens['root'].append(
(r'\\[^\s]+', Keyword.Pseudo, 'psql-command'))
diff --git a/pygments/style.py b/pygments/style.py
index 73264577..8f50feb1 100644
--- a/pygments/style.py
+++ b/pygments/style.py
@@ -10,7 +10,6 @@
"""
from pygments.token import Token, STANDARD_TYPES
-from pygments.util import add_metaclass
# Default mapping of ansixxx to RGB colors.
_ansimap = {
@@ -169,8 +168,7 @@ class StyleMeta(type):
return len(cls._styles)
-@add_metaclass(StyleMeta)
-class Style(object):
+class Style(metaclass=StyleMeta):
#: overall background color (``None`` means transparent)
background_color = '#ffffff'
diff --git a/pygments/unistring.py b/pygments/unistring.py
index dd011cf0..466ebdb6 100644
--- a/pygments/unistring.py
+++ b/pygments/unistring.py
@@ -156,10 +156,6 @@ def _handle_runs(char_list): # pragma: no cover
if __name__ == '__main__': # pragma: no cover
import unicodedata
- # we need Py3 for the determination of the XID_* properties
- if sys.version_info[:2] < (3, 3):
- raise RuntimeError('this file must be regenerated with Python 3.3+')
-
categories_bmp = {'xid_start': [], 'xid_continue': []}
categories_nonbmp = {'xid_start': [], 'xid_continue': []}
diff --git a/pygments/util.py b/pygments/util.py
index 054b705e..fb81d464 100644
--- a/pygments/util.py
+++ b/pygments/util.py
@@ -11,6 +11,7 @@
import re
import sys
+from io import TextIOWrapper
split_path_re = re.compile(r'[/\\ ]')
@@ -53,7 +54,7 @@ def get_bool_opt(options, optname, default=None):
return string
elif isinstance(string, int):
return bool(string)
- elif not isinstance(string, string_types):
+ elif not isinstance(string, str):
raise OptionError('Invalid type %r for option %s; use '
'1/0, yes/no, true/false, on/off' % (
string, optname))
@@ -83,7 +84,7 @@ def get_int_opt(options, optname, default=None):
def get_list_opt(options, optname, default=None):
val = options.get(optname, default)
- if isinstance(val, string_types):
+ if isinstance(val, str):
return val.split()
elif isinstance(val, (list, tuple)):
return list(val)
@@ -224,7 +225,7 @@ def unirange(a, b):
if sys.maxunicode > 0xffff:
# wide build
- return u'[%s-%s]' % (unichr(a), unichr(b))
+ return u'[%s-%s]' % (chr(a), chr(b))
else:
# narrow build stores surrogates, and the 're' module handles them
# (incorrectly) as characters. Since there is still ordering among
@@ -232,24 +233,23 @@ def unirange(a, b):
# background in http://bugs.python.org/issue3665 and
# http://bugs.python.org/issue12749
#
- # Additionally, the lower constants are using unichr rather than
+ # Additionally, the lower constants are using chr rather than
# literals because jython [which uses the wide path] can't load this
# file if they are literals.
ah, al = _surrogatepair(a)
bh, bl = _surrogatepair(b)
if ah == bh:
- return u'(?:%s[%s-%s])' % (unichr(ah), unichr(al), unichr(bl))
+ return u'(?:%s[%s-%s])' % (chr(ah), chr(al), chr(bl))
else:
buf = []
- buf.append(u'%s[%s-%s]' %
- (unichr(ah), unichr(al),
- ah == bh and unichr(bl) or unichr(0xdfff)))
+ buf.append(u'%s[%s-%s]' % (chr(ah), chr(al),
+ ah == bh and chr(bl) or chr(0xdfff)))
if ah - bh > 1:
buf.append(u'[%s-%s][%s-%s]' %
- unichr(ah+1), unichr(bh-1), unichr(0xdc00), unichr(0xdfff))
+ chr(ah+1), chr(bh-1), chr(0xdc00), chr(0xdfff))
if ah != bh:
buf.append(u'%s[%s-%s]' %
- (unichr(bh), unichr(0xdc00), unichr(bl)))
+ (chr(bh), chr(0xdc00), chr(bl)))
return u'(?:' + u'|'.join(buf) + u')'
@@ -345,44 +345,7 @@ def terminal_encoding(term):
return locale.getpreferredencoding()
-# Python 2/3 compatibility
-
-if sys.version_info < (3, 0):
- unichr = unichr
- xrange = xrange
- string_types = (str, unicode)
- text_type = unicode
- u_prefix = 'u'
- iteritems = dict.iteritems
- itervalues = dict.itervalues
- import StringIO
- import cStringIO
- # unfortunately, io.StringIO in Python 2 doesn't accept str at all
- StringIO = StringIO.StringIO
- BytesIO = cStringIO.StringIO
-else:
- unichr = chr
- xrange = range
- string_types = (str,)
- text_type = str
- u_prefix = ''
- iteritems = dict.items
- itervalues = dict.values
- from io import StringIO, BytesIO, TextIOWrapper
-
- class UnclosingTextIOWrapper(TextIOWrapper):
- # Don't close underlying buffer on destruction.
- def close(self):
- self.flush()
-
-
-def add_metaclass(metaclass):
- """Class decorator for creating a class with a metaclass."""
- def wrapper(cls):
- orig_vars = cls.__dict__.copy()
- orig_vars.pop('__dict__', None)
- orig_vars.pop('__weakref__', None)
- for slots_var in orig_vars.get('__slots__', ()):
- orig_vars.pop(slots_var)
- return metaclass(cls.__name__, cls.__bases__, orig_vars)
- return wrapper
+class UnclosingTextIOWrapper(TextIOWrapper):
+ # Don't close underlying buffer on destruction.
+ def close(self):
+ self.flush()
diff --git a/setup.py b/setup.py
index 0c9f4f54..62db3a56 100755
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@ setup(
platforms = 'any',
zip_safe = False,
include_package_data = True,
- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
+ python_requires='>=3.5',
classifiers = [
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
@@ -50,8 +50,6 @@ setup(
'Intended Audience :: System Administrators',
'Development Status :: 6 - Mature',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index 7d08c05e..583122d2 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -10,6 +10,7 @@
from __future__ import print_function
import random
+from io import StringIO, BytesIO
from os import path
import pytest
@@ -18,12 +19,12 @@ from pygments import lexers, formatters, lex, format
from pygments.token import _TokenType, Text
from pygments.lexer import RegexLexer
from pygments.formatters.img import FontNotFound
-from pygments.util import text_type, StringIO, BytesIO, xrange, ClassNotFound
+from pygments.util import ClassNotFound
TESTDIR = path.dirname(path.abspath(__file__))
TESTFILE = path.join(TESTDIR, 'test_basic_api.py')
-test_content = [chr(i) for i in xrange(33, 128)] * 5
+test_content = [chr(i) for i in range(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'
@@ -75,7 +76,7 @@ def test_random_input(cls):
for token in tokens:
assert isinstance(token, tuple)
assert isinstance(token[0], _TokenType)
- assert isinstance(token[1], text_type)
+ assert isinstance(token[1], str)
txt += token[1]
assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
(cls.name, test_content, txt)
@@ -176,7 +177,7 @@ def test_formatter_encodings():
fmt = HtmlFormatter()
tokens = [(Text, u"ä")]
out = format(tokens, fmt)
- assert type(out) is text_type
+ assert type(out) is str
assert u"ä" in out
# encoding option
@@ -206,7 +207,7 @@ def test_formatter_unicode_handling(cls):
if cls.name != 'Raw tokens':
out = format(tokens, inst)
if cls.unicodeoutput:
- assert type(out) is text_type, '%s: %r' % (cls, out)
+ assert type(out) is str, '%s: %r' % (cls, out)
inst = cls(encoding='utf-8')
out = format(tokens, inst)
@@ -272,7 +273,7 @@ class TestFilters(object):
with open(TESTFILE, 'rb') as fp:
text = fp.read().decode('utf-8')
tokens = list(lx.get_tokens(text))
- assert all(isinstance(t[1], text_type) for t in tokens), \
+ assert all(isinstance(t[1], str) for t in tokens), \
'%s filter did not return Unicode' % x
roundtext = ''.join([t[1] for t in tokens])
if x not in ('whitespace', 'keywordcase', 'gobble'):
diff --git a/tests/test_cmdline.py b/tests/test_cmdline.py
index d56e2ae5..8392dcf6 100644
--- a/tests/test_cmdline.py
+++ b/tests/test_cmdline.py
@@ -14,12 +14,12 @@ import os
import re
import sys
import tempfile
+from io import BytesIO
from os import path
from pytest import raises
from pygments import cmdline, highlight
-from pygments.util import BytesIO, StringIO
TESTDIR = path.dirname(path.abspath(__file__))
TESTFILE = path.join(TESTDIR, 'test_cmdline.py')
@@ -41,17 +41,12 @@ def run_cmdline(*args, **kwds):
saved_stdin = sys.stdin
saved_stdout = sys.stdout
saved_stderr = sys.stderr
- if sys.version_info > (3,):
- stdin_buffer = BytesIO()
- stdout_buffer = BytesIO()
- stderr_buffer = BytesIO()
- new_stdin = sys.stdin = io.TextIOWrapper(stdin_buffer, 'utf-8')
- new_stdout = sys.stdout = io.TextIOWrapper(stdout_buffer, 'utf-8')
- new_stderr = sys.stderr = io.TextIOWrapper(stderr_buffer, 'utf-8')
- else:
- stdin_buffer = new_stdin = sys.stdin = StringIO()
- stdout_buffer = new_stdout = sys.stdout = StringIO()
- stderr_buffer = new_stderr = sys.stderr = StringIO()
+ stdin_buffer = BytesIO()
+ stdout_buffer = BytesIO()
+ stderr_buffer = BytesIO()
+ new_stdin = sys.stdin = io.TextIOWrapper(stdin_buffer, 'utf-8')
+ new_stdout = sys.stdout = io.TextIOWrapper(stdout_buffer, 'utf-8')
+ new_stderr = sys.stderr = io.TextIOWrapper(stderr_buffer, 'utf-8')
new_stdin.write(kwds.get('stdin', ''))
new_stdin.seek(0, 0)
try:
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index 95247398..3001a722 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -14,10 +14,10 @@ import os
import re
import tempfile
from os import path
+from io import StringIO
from pytest import raises
-from pygments.util import StringIO
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
diff --git a/tests/test_irc_formatter.py b/tests/test_irc_formatter.py
index 046a0d19..af950730 100644
--- a/tests/test_irc_formatter.py
+++ b/tests/test_irc_formatter.py
@@ -9,7 +9,8 @@
from __future__ import print_function
-from pygments.util import StringIO
+from io import StringIO
+
from pygments.lexers import PythonLexer
from pygments.formatters import IRCFormatter
diff --git a/tests/test_rtf_formatter.py b/tests/test_rtf_formatter.py
index 35179df4..23d6695f 100644
--- a/tests/test_rtf_formatter.py
+++ b/tests/test_rtf_formatter.py
@@ -7,7 +7,8 @@
:license: BSD, see LICENSE for details.
"""
-from pygments.util import StringIO
+from io import StringIO
+
from pygments.formatters import RtfFormatter
from pygments.lexers.special import TextLexer
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
index 98eab581..0367f941 100644
--- a/tests/test_terminal_formatter.py
+++ b/tests/test_terminal_formatter.py
@@ -10,8 +10,8 @@
from __future__ import print_function
import re
+from io import StringIO
-from pygments.util import StringIO
from pygments.lexers.sql import PlPgsqlLexer
from pygments.formatters import TerminalFormatter, Terminal256Formatter, \
HtmlFormatter, LatexFormatter
diff --git a/tests/test_unistring.py b/tests/test_unistring.py
index a4b58827..3a0acec9 100644
--- a/tests/test_unistring.py
+++ b/tests/test_unistring.py
@@ -11,7 +11,6 @@ import re
import random
from pygments import unistring as uni
-from pygments.util import unichr
def test_cats_exist_and_compilable():
@@ -39,7 +38,7 @@ def test_spot_check_types():
random.seed(0)
for i in range(1000):
o = random.randint(0, 65535)
- c = unichr(o)
+ c = chr(o)
if o > 0xd800 and o <= 0xdfff and not uni.Cs:
continue # Bah, Jython.
print(hex(o))
diff --git a/tests/test_util.py b/tests/test_util.py
index aa7b7acb..d5706407 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -193,17 +193,6 @@ def test_guess_decode_from_terminal():
assert s == (u'\xff', 'utf-8')
-def test_add_metaclass():
- class Meta(type):
- pass
-
- @util.add_metaclass(Meta)
- class Cls:
- pass
-
- assert type(Cls) is Meta
-
-
def test_console_ansiformat():
f = console.ansiformat
c = console.codes