summaryrefslogtreecommitdiff
path: root/pygments/lexer.py
diff options
context:
space:
mode:
authorGeorg Brandl <georg@python.org>2014-09-16 10:32:39 +0200
committerGeorg Brandl <georg@python.org>2014-09-16 10:32:39 +0200
commita54efd963ebd1a74eff5f1d6f7efcdd8895980e3 (patch)
tree2558a18f32a758e548fdb28e07db39f5c6ebb362 /pygments/lexer.py
parentf023ece2fab081c16a763ca43b513552fc87f207 (diff)
downloadpygments-a54efd963ebd1a74eff5f1d6f7efcdd8895980e3.tar.gz
pygments.lexer: small PEP8 overhaul.
Diffstat (limited to 'pygments/lexer.py')
-rw-r--r--pygments/lexer.py19
1 files changed, 11 insertions, 8 deletions
diff --git a/pygments/lexer.py b/pygments/lexer.py
index 0ede7927..5214d43e 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -8,17 +8,20 @@
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import re, itertools
+
+import re
+import itertools
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
- make_analysator, text_type, add_metaclass, iteritems
+ make_analysator, text_type, add_metaclass, iteritems
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
- 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', 'default']
+ 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
+ 'default']
_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
@@ -160,7 +163,7 @@ class Lexer(object):
break
# no BOM found, so use chardet
if decoded is None:
- enc = chardet.detect(text[:1024]) # Guess using first 1KB
+ enc = chardet.detect(text[:1024]) # Guess using first 1KB
decoded = text.decode(enc.get('encoding') or 'utf-8',
'replace')
text = decoded
@@ -237,7 +240,7 @@ class DelegatingLexer(Lexer):
self.root_lexer.get_tokens_unprocessed(buffered))
-#-------------------------------------------------------------------------------
+# ------------------------------------------------------------------------------
# RegexLexer and ExtendedRegexLexer
#
@@ -406,7 +409,7 @@ class RegexLexerMeta(LexerMeta):
def _process_token(cls, token):
"""Preprocess the token component of a token definition."""
assert type(token) is _TokenType or callable(token), \
- 'token type must be simple type or callable, not %r' % (token,)
+ 'token type must be simple type or callable, not %r' % (token,)
return token
def _process_new_state(cls, new_state, unprocessed, processed):
@@ -439,7 +442,7 @@ class RegexLexerMeta(LexerMeta):
for istate in new_state:
assert (istate in unprocessed or
istate in ('#pop', '#push')), \
- 'unknown new state ' + istate
+ 'unknown new state ' + istate
return new_state
else:
assert False, 'unknown new state def %r' % new_state
@@ -645,7 +648,7 @@ class LexerContext(object):
def __init__(self, text, pos, stack=None, end=None):
self.text = text
self.pos = pos
- self.end = end or len(text) # end=0 not supported ;-)
+ self.end = end or len(text) # end=0 not supported ;-)
self.stack = stack or ['root']
def __repr__(self):