diff options
Diffstat (limited to 'pygments/lexers/special.py')
-rw-r--r-- | pygments/lexers/special.py | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/pygments/lexers/special.py b/pygments/lexers/special.py index 9b3cd508..9ea2e22c 100644 --- a/pygments/lexers/special.py +++ b/pygments/lexers/special.py @@ -5,16 +5,15 @@ Special lexers. - :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. + :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re -import cStringIO from pygments.lexer import Lexer from pygments.token import Token, Error, Text -from pygments.util import get_choice_opt, b +from pygments.util import get_choice_opt, text_type, BytesIO __all__ = ['TextLexer', 'RawTokenLexer'] @@ -35,7 +34,7 @@ class TextLexer(Lexer): _ttype_cache = {} -line_re = re.compile(b('.*?\n')) +line_re = re.compile(b'.*?\n') class RawTokenLexer(Lexer): """ @@ -60,12 +59,12 @@ class RawTokenLexer(Lexer): Lexer.__init__(self, **options) def get_tokens(self, text): - if isinstance(text, unicode): + if isinstance(text, text_type): # raw token stream never has any non-ASCII characters text = text.encode('ascii') if self.compress == 'gz': import gzip - gzipfile = gzip.GzipFile('', 'rb', 9, cStringIO.StringIO(text)) + gzipfile = gzip.GzipFile('', 'rb', 9, BytesIO(text)) text = gzipfile.read() elif self.compress == 'bz2': import bz2 @@ -73,7 +72,7 @@ class RawTokenLexer(Lexer): # do not call Lexer.get_tokens() because we do not want Unicode # decoding to occur, and stripping is not optional. - text = text.strip(b('\n')) + b('\n') + text = text.strip(b'\n') + b'\n' for i, t, v in self.get_tokens_unprocessed(text): yield t, v @@ -81,7 +80,7 @@ class RawTokenLexer(Lexer): length = 0 for match in line_re.finditer(text): try: - ttypestr, val = match.group().split(b('\t'), 1) + ttypestr, val = match.group().split(b'\t', 1) except ValueError: val = match.group().decode(self.encoding) ttype = Error |