summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Beazley <dave@dabeaz.com>2015-04-21 14:03:36 -0500
committerDavid Beazley <dave@dabeaz.com>2015-04-21 14:03:36 -0500
commitaa8f06c083194f64f2aaf68382675062adbdf196 (patch)
treebfb8f36ec604d39e81b41848ebd3b4a6eb0f0871
parent117e0aa305c696e1a102c222dcfc197c4f59b8ce (diff)
downloadply-aa8f06c083194f64f2aaf68382675062adbdf196.tar.gz
Code modernization related to range() and enumerate().
-rw-r--r--ply/lex.py56
-rw-r--r--ply/yacc.py36
2 files changed, 45 insertions, 47 deletions
diff --git a/ply/lex.py b/ply/lex.py
index d136c52..7a7d18e 100644
--- a/ply/lex.py
+++ b/ply/lex.py
@@ -184,32 +184,25 @@ class Lexer:
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
+ # Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
- # Collect all functions in the initial state
- initial = self.lexstatere['INITIAL']
- initialfuncs = []
- for part in initial:
- for f in part[1]:
- if f and f[0]:
- initialfuncs.append(f)
-
- for key, lre in self.lexstatere.items():
+ for statename, lre in self.lexstatere.items():
titem = []
- for i in range(len(lre)):
- titem.append((self.lexstateretext[key][i], _funcs_to_names(lre[i][1], self.lexstaterenames[key][i])))
- tabre[key] = titem
+ for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
+ titem.append((retext, _funcs_to_names(func, renames)))
+ tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
- for key, ef in self.lexstateerrorf.items():
- taberr[key] = ef.__name__ if ef else None
+ for statename, ef in self.lexstateerrorf.items():
+ taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
- for key, ef in self.lexstateeoff.items():
- tabeof[key] = ef.__name__ if ef else None
+ for statename, ef in self.lexstateeoff.items():
+ tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
@@ -238,22 +231,22 @@ class Lexer:
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
- for key, lre in lextab._lexstatere.items():
+ for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
- for i in range(len(lre)):
- titem.append((re.compile(lre[i][0], lextab._lexreflags | re.VERBOSE), _names_to_funcs(lre[i][1], fdict)))
- txtitem.append(lre[i][0])
- self.lexstatere[key] = titem
- self.lexstateretext[key] = txtitem
+ for pat, func_name in lre:
+ titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict)))
+
+ self.lexstatere[statename] = titem
+ self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
- for key, ef in lextab._lexstateerrorf.items():
- self.lexstateerrorf[key] = fdict[ef]
+ for statename, ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
- for key, ef in lextab._lexstateeoff.items():
- self.lexstateeoff[key] = fdict[ef]
+ for statename, ef in lextab._lexstateeoff.items():
+ self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
@@ -545,9 +538,10 @@ def _form_master_re(relist, reflags, ldict, toknames):
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
- for i in range(1, len(parts)):
- if not parts[i] in names and parts[i] != 'ANY':
+ for i, part in enumerate(parts[1:], 1):
+ if part not in names and part != 'ANY':
break
+
if i > 1:
states = tuple(parts[1:i])
else:
@@ -573,7 +567,7 @@ class LexerReflect(object):
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
- self.modules = {}
+ self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
@@ -745,7 +739,7 @@ class LexerReflect(object):
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
- self.modules[module] = 1
+ self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
@@ -814,7 +808,7 @@ class LexerReflect(object):
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
- self.modules[module] = 1
+ self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
diff --git a/ply/yacc.py b/ply/yacc.py
index 34c34aa..22864aa 100644
--- a/ply/yacc.py
+++ b/ply/yacc.py
@@ -437,7 +437,8 @@ class LRParser:
#--! DEBUG
if plen:
- debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', -t)
+ debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
+ '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', -t)
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], -t)
@@ -1494,7 +1495,8 @@ class Grammar(object):
try:
c = eval(s)
if (len(c) > 1):
- raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % (file, line, s, prodname))
+ raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
+ (file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
@@ -1509,7 +1511,8 @@ class Grammar(object):
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
- raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % (file, line))
+ raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
+ (file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
@@ -1823,8 +1826,7 @@ class Grammar(object):
didadd = False
for p in self.Productions[1:]:
# Here is the production set
- for i in range(len(p.prod)):
- B = p.prod[i]
+ for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
@@ -2214,14 +2216,13 @@ class LRGeneratedTable(LRTable):
def find_nonterminal_transitions(self, C):
trans = []
- for state in range(len(C)):
- for p in C[state]:
+ for stateno, state in enumerate(C):
+ for p in state:
if p.lr_index < p.len - 1:
- t = (state, p.prod[p.lr_index+1])
+ t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
- state = state + 1
return trans
# -----------------------------------------------------------------------------
@@ -2549,7 +2550,8 @@ class LRGeneratedTable(LRTable):
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
- log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', a, st_actionp[a].number, st_actionp[a])
+ log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
+ a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
@@ -2860,7 +2862,7 @@ class ParserReflect(object):
self.start = None
self.error_func = None
self.tokens = None
- self.modules = {}
+ self.modules = set()
self.grammar = []
self.error = False
@@ -2927,7 +2929,7 @@ class ParserReflect(object):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
- for module in self.modules.keys():
+ for module in self.modules:
lines, linen = inspect.getsourcelines(module)
counthash = {}
@@ -2941,7 +2943,8 @@ class ParserReflect(object):
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
- self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', filename, linen, name, prev)
+ self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
+ filename, linen, name, prev)
# Get the start symbol
def get_start(self):
@@ -2972,7 +2975,7 @@ class ParserReflect(object):
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
- self.modules[module] = 1
+ self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
@@ -3086,7 +3089,8 @@ class ParserReflect(object):
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
- self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', file, line, func.__name__)
+ self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
+ file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
@@ -3098,7 +3102,7 @@ class ParserReflect(object):
# Looks like a valid grammar rule
# Mark the file in which defined.
- self.modules[module] = 1
+ self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.