diff options
author | Georg Brandl <georg@python.org> | 2014-01-18 16:44:49 +0100 |
---|---|---|
committer | Georg Brandl <georg@python.org> | 2014-01-18 16:44:49 +0100 |
commit | 97703d63f39e6086d497a6a749c9eee3293dcbeb (patch) | |
tree | c970bf2a7bc17aa7053f3621e299a01fb9695342 /pygments/lexers/special.py | |
parent | 5500fd3a6d0c5ece01826606fcf2d684407b9cc6 (diff) | |
download | pygments-97703d63f39e6086d497a6a749c9eee3293dcbeb.tar.gz |
Finalize single-source port for Py2.[67] and Py3.3+.
Diffstat (limited to 'pygments/lexers/special.py')
-rw-r--r-- | pygments/lexers/special.py | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py index d7fe6b53..9ea2e22c 100644 --- a/pygments/lexers/special.py +++ b/pygments/lexers/special.py @@ -10,11 +10,10 @@ """ import re -import cStringIO from pygments.lexer import Lexer from pygments.token import Token, Error, Text -from pygments.util import get_choice_opt, b +from pygments.util import get_choice_opt, text_type, BytesIO __all__ = ['TextLexer', 'RawTokenLexer'] @@ -35,7 +34,7 @@ class TextLexer(Lexer): _ttype_cache = {} -line_re = re.compile(b('.*?\n')) +line_re = re.compile(b'.*?\n') class RawTokenLexer(Lexer): """ @@ -60,12 +59,12 @@ class RawTokenLexer(Lexer): Lexer.__init__(self, **options) def get_tokens(self, text): - if isinstance(text, unicode): + if isinstance(text, text_type): # raw token stream never has any non-ASCII characters text = text.encode('ascii') if self.compress == 'gz': import gzip - gzipfile = gzip.GzipFile('', 'rb', 9, cStringIO.StringIO(text)) + gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text)) text = gzipfile.read() elif self.compress == 'bz2': import bz2 @@ -73,7 +72,7 @@ class RawTokenLexer(Lexer): # do not call Lexer.get_tokens() because we do not want Unicode # decoding to occur, and stripping is not optional. - text = text.strip(b('\n')) + b('\n') + text = text.strip(b'\n') + b'\n' for i, t, v in self.get_tokens_unprocessed(text): yield t, v @@ -81,7 +80,7 @@ class RawTokenLexer(Lexer): length = 0 for match in line_re.finditer(text): try: - ttypestr, val = match.group().split(b('\t'), 1) + ttypestr, val = match.group().split(b'\t', 1) except ValueError: val = match.group().decode(self.encoding) ttype = Error |