summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Robitaille <thomas.robitaille@gmail.com>2018-02-09 11:18:01 +0000
committerThomas Robitaille <thomas.robitaille@gmail.com>2018-02-09 11:18:01 +0000
commit3e3b45f814d0317a3d67aba71eec140b845b136b (patch)
treeb13f5ffb3647769c90c418ca34b2860c20765aa2
parent6860652be4069eaac8be88af1400f69a5197284f (diff)
downloadply-3e3b45f814d0317a3d67aba71eec140b845b136b.tar.gz
Remove trailing whitespace
-rw-r--r--ply/cpp.py59
-rw-r--r--ply/ctokens.py12
-rw-r--r--ply/lex.py5
-rw-r--r--ply/ygen.py5
4 files changed, 29 insertions, 52 deletions
diff --git a/ply/cpp.py b/ply/cpp.py
index f527655..2422916 100644
--- a/ply/cpp.py
+++ b/ply/cpp.py
@@ -5,7 +5,7 @@
# Copyright (C) 2007
# All rights reserved
#
-# This module implements an ANSI-C style lexical preprocessor for PLY.
+# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
@@ -78,7 +78,7 @@ def t_CPP_COMMENT2(t):
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t
-
+
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
@@ -92,8 +92,8 @@ import os.path
# -----------------------------------------------------------------------------
# trigraph()
-#
-# Given an input string, this function replaces all trigraph sequences.
+#
+# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
@@ -263,7 +263,7 @@ class Preprocessor(object):
# ----------------------------------------------------------------------
# add_path()
#
- # Adds a search path to the preprocessor.
+ # Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
@@ -307,7 +307,7 @@ class Preprocessor(object):
# ----------------------------------------------------------------------
# tokenstrip()
- #
+ #
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
@@ -333,7 +333,7 @@ class Preprocessor(object):
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
- # from each argument.
+ # from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
@@ -345,7 +345,7 @@ class Preprocessor(object):
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
-
+
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
@@ -379,7 +379,7 @@ class Preprocessor(object):
else:
current_arg.append(t)
i += 1
-
+
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
@@ -391,9 +391,9 @@ class Preprocessor(object):
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
-
+
def macro_prescan(self,macro):
- macro.patch = [] # Standard macro arguments
+ macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
@@ -441,7 +441,7 @@ class Preprocessor(object):
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
-
+
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
@@ -459,7 +459,7 @@ class Preprocessor(object):
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
-
+
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
@@ -496,7 +496,7 @@ class Preprocessor(object):
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
-
+
m = self.macros[t.value]
if not m.arglist:
# A simple macro
@@ -528,7 +528,7 @@ class Preprocessor(object):
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
-
+
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
@@ -547,13 +547,13 @@ class Preprocessor(object):
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
-
+
i += 1
return tokens
- # ----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# evalexpr()
- #
+ #
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
@@ -600,7 +600,7 @@ class Preprocessor(object):
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
-
+
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
@@ -625,7 +625,7 @@ class Preprocessor(object):
if not source:
source = ""
-
+
self.define("__FILE__ \"%s\"" % source)
self.source = source
@@ -644,7 +644,7 @@ class Preprocessor(object):
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
-
+
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
@@ -652,7 +652,7 @@ class Preprocessor(object):
else:
name = ""
args = []
-
+
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
@@ -712,7 +712,7 @@ class Preprocessor(object):
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
-
+
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
@@ -882,7 +882,7 @@ class Preprocessor(object):
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
-
+
# ----------------------------------------------------------------------
# token()
#
@@ -912,14 +912,3 @@ if __name__ == '__main__':
tok = p.token()
if not tok: break
print(p.source, tok)
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ply/ctokens.py b/ply/ctokens.py
index f6f6952..b265e59 100644
--- a/ply/ctokens.py
+++ b/ply/ctokens.py
@@ -16,7 +16,7 @@ tokens = [
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
-
+
# Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
@@ -29,7 +29,7 @@ tokens = [
# Ternary operator (?)
'TERNARY',
-
+
# Delimeters ( ) [ ] { } , . ; :
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
@@ -39,7 +39,7 @@ tokens = [
# Ellipsis (...)
'ELLIPSIS',
]
-
+
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
@@ -125,9 +125,3 @@ def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t
-
-
-
-
-
-
diff --git a/ply/lex.py b/ply/lex.py
index 65deefe..f3351ba 100644
--- a/ply/lex.py
+++ b/ply/lex.py
@@ -184,7 +184,7 @@ class Lexer:
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
- # Rewrite the lexstatere table, replacing function objects with function names
+ # Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
@@ -535,7 +535,7 @@ def _statetoken(s, names):
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
-
+
if i > 1:
states = tuple(parts[1:i])
else:
@@ -1096,4 +1096,3 @@ def TOKEN(r):
# Alternative spelling of the TOKEN decorator
Token = TOKEN
-
diff --git a/ply/ygen.py b/ply/ygen.py
index 62a2c35..03b9318 100644
--- a/ply/ygen.py
+++ b/ply/ygen.py
@@ -67,8 +67,3 @@ def main():
if __name__ == '__main__':
main()
-
-
-
-
-