summaryrefslogtreecommitdiff
path: root/example
diff options
context:
space:
mode:
authorDavid Beazley <dave@dabeaz.com>2020-02-22 15:57:54 -0600
committerDavid Beazley <dave@dabeaz.com>2020-02-22 15:57:54 -0600
commit1fac9fed647909b92f3779dd34beb8564f6f247b (patch)
treec520f1c56791c541105c1023c8dd68d99853ad8b /example
parent1321375e013425958ea090b55aecae0a4b7face6 (diff)
downloadply-1fac9fed647909b92f3779dd34beb8564f6f247b.tar.gz
Massive refactoring/cleanup
Diffstat (limited to 'example')
-rw-r--r--example/BASIC/basic.py7
-rw-r--r--example/GardenSnake/GardenSnake.py777
-rw-r--r--example/GardenSnake/README5
-rw-r--r--example/README1
-rw-r--r--example/calc/calc.py14
-rw-r--r--example/calcdebug/calc.py7
-rw-r--r--example/calceof/calc.py9
-rwxr-xr-xexample/classcalc/calc.py13
-rw-r--r--example/closurecalc/calc.py5
-rw-r--r--example/hedit/hedit.py48
-rwxr-xr-xexample/newclasscalc/calc.py167
-rw-r--r--example/optcalc/README9
-rw-r--r--example/optcalc/calc.py134
-rw-r--r--example/unicalc/calc.py133
-rw-r--r--example/yply/yparse.py2
15 files changed, 17 insertions, 1314 deletions
diff --git a/example/BASIC/basic.py b/example/BASIC/basic.py
index 17687b1..8a8a500 100644
--- a/example/BASIC/basic.py
+++ b/example/BASIC/basic.py
@@ -4,9 +4,6 @@
import sys
sys.path.insert(0, "../..")
-if sys.version_info[0] >= 3:
- raw_input = input
-
import basiclex
import basparse
import basinterp
@@ -36,9 +33,9 @@ else:
# Specifying a line number with no code deletes that line from
# the program.
-while 1:
+while True:
try:
- line = raw_input("[BASIC] ")
+ line = input("[BASIC] ")
except EOFError:
raise SystemExit
if not line:
diff --git a/example/GardenSnake/GardenSnake.py b/example/GardenSnake/GardenSnake.py
deleted file mode 100644
index 8b493b4..0000000
--- a/example/GardenSnake/GardenSnake.py
+++ /dev/null
@@ -1,777 +0,0 @@
-# GardenSnake - a parser generator demonstration program
-#
-# This implements a modified version of a subset of Python:
-# - only 'def', 'return' and 'if' statements
-# - 'if' only has 'then' clause (no elif nor else)
-# - single-quoted strings only, content in raw format
-# - numbers are decimal.Decimal instances (not integers or floats)
-# - no print statment; use the built-in 'print' function
-# - only < > == + - / * implemented (and unary + -)
-# - assignment and tuple assignment work
-# - no generators of any sort
-# - no ... well, no quite a lot
-
-# Why? I'm thinking about a new indentation-based configuration
-# language for a project and wanted to figure out how to do it. Once
-# I got that working I needed a way to test it out. My original AST
-# was dumb so I decided to target Python's AST and compile it into
-# Python code. Plus, it's pretty cool that it only took a day or so
-# from sitting down with Ply to having working code.
-
-# This uses David Beazley's Ply from http://www.dabeaz.com/ply/
-
-# This work is hereby released into the Public Domain. To view a copy of
-# the public domain dedication, visit
-# http://creativecommons.org/licenses/publicdomain/ or send a letter to
-# Creative Commons, 543 Howard Street, 5th Floor, San Francisco,
-# California, 94105, USA.
-#
-# Portions of this work are derived from Python's Grammar definition
-# and may be covered under the Python copyright and license
-#
-# Andrew Dalke / Dalke Scientific Software, LLC
-# 30 August 2006 / Cape Town, South Africa
-
-# Changelog:
-# 30 August - added link to CC license; removed the "swapcase" encoding
-
-# Modifications for inclusion in PLY distribution
-import sys
-sys.path.insert(0, "../..")
-from ply import *
-
-##### Lexer ######
-#import lex
-import decimal
-
-tokens = (
- 'DEF',
- 'IF',
- 'NAME',
- 'NUMBER', # Python decimals
- 'STRING', # single quoted strings only; syntax of raw strings
- 'LPAR',
- 'RPAR',
- 'COLON',
- 'EQ',
- 'ASSIGN',
- 'LT',
- 'GT',
- 'PLUS',
- 'MINUS',
- 'MULT',
- 'DIV',
- 'RETURN',
- 'WS',
- 'NEWLINE',
- 'COMMA',
- 'SEMICOLON',
- 'INDENT',
- 'DEDENT',
- 'ENDMARKER',
-)
-
-#t_NUMBER = r'\d+'
-# taken from decmial.py but without the leading sign
-
-
-def t_NUMBER(t):
- r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
- t.value = decimal.Decimal(t.value)
- return t
-
-
-def t_STRING(t):
- r"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
- t.value = t.value[1:-1].decode("string-escape") # .swapcase() # for fun
- return t
-
-t_COLON = r':'
-t_EQ = r'=='
-t_ASSIGN = r'='
-t_LT = r'<'
-t_GT = r'>'
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_MULT = r'\*'
-t_DIV = r'/'
-t_COMMA = r','
-t_SEMICOLON = r';'
-
-# Ply nicely documented how to do this.
-
-RESERVED = {
- "def": "DEF",
- "if": "IF",
- "return": "RETURN",
-}
-
-
-def t_NAME(t):
- r'[a-zA-Z_][a-zA-Z0-9_]*'
- t.type = RESERVED.get(t.value, "NAME")
- return t
-
-# Putting this before t_WS let it consume lines with only comments in
-# them so the latter code never sees the WS part. Not consuming the
-# newline. Needed for "if 1: #comment"
-
-
-def t_comment(t):
- r"[ ]*\043[^\n]*" # \043 is '#'
- pass
-
-
-# Whitespace
-def t_WS(t):
- r' [ ]+ '
- if t.lexer.at_line_start and t.lexer.paren_count == 0:
- return t
-
-# Don't generate newline tokens when inside of parenthesis, eg
-# a = (1,
-# 2, 3)
-
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += len(t.value)
- t.type = "NEWLINE"
- if t.lexer.paren_count == 0:
- return t
-
-
-def t_LPAR(t):
- r'\('
- t.lexer.paren_count += 1
- return t
-
-
-def t_RPAR(t):
- r'\)'
- # check for underflow? should be the job of the parser
- t.lexer.paren_count -= 1
- return t
-
-
-def t_error(t):
- raise SyntaxError("Unknown symbol %r" % (t.value[0],))
- print "Skipping", repr(t.value[0])
- t.lexer.skip(1)
-
-# I implemented INDENT / DEDENT generation as a post-processing filter
-
-# The original lex token stream contains WS and NEWLINE characters.
-# WS will only occur before any other tokens on a line.
-
-# I have three filters. One tags tokens by adding two attributes.
-# "must_indent" is True if the token must be indented from the
-# previous code. The other is "at_line_start" which is True for WS
-# and the first non-WS/non-NEWLINE on a line. It flags the check so
-# see if the new line has changed indication level.
-
-# Python's syntax has three INDENT states
-# 0) no colon hence no need to indent
-# 1) "if 1: go()" - simple statements have a COLON but no need for an indent
-# 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
-NO_INDENT = 0
-MAY_INDENT = 1
-MUST_INDENT = 2
-
-# only care about whitespace at the start of a line
-
-
-def track_tokens_filter(lexer, tokens):
- lexer.at_line_start = at_line_start = True
- indent = NO_INDENT
- saw_colon = False
- for token in tokens:
- token.at_line_start = at_line_start
-
- if token.type == "COLON":
- at_line_start = False
- indent = MAY_INDENT
- token.must_indent = False
-
- elif token.type == "NEWLINE":
- at_line_start = True
- if indent == MAY_INDENT:
- indent = MUST_INDENT
- token.must_indent = False
-
- elif token.type == "WS":
- assert token.at_line_start == True
- at_line_start = True
- token.must_indent = False
-
- else:
- # A real token; only indent after COLON NEWLINE
- if indent == MUST_INDENT:
- token.must_indent = True
- else:
- token.must_indent = False
- at_line_start = False
- indent = NO_INDENT
-
- yield token
- lexer.at_line_start = at_line_start
-
-
-def _new_token(type, lineno):
- tok = lex.LexToken()
- tok.type = type
- tok.value = None
- tok.lineno = lineno
- return tok
-
-# Synthesize a DEDENT tag
-
-
-def DEDENT(lineno):
- return _new_token("DEDENT", lineno)
-
-# Synthesize an INDENT tag
-
-
-def INDENT(lineno):
- return _new_token("INDENT", lineno)
-
-
-# Track the indentation level and emit the right INDENT / DEDENT events.
-def indentation_filter(tokens):
- # A stack of indentation levels; will never pop item 0
- levels = [0]
- token = None
- depth = 0
- prev_was_ws = False
- for token in tokens:
- # if 1:
- # print "Process", token,
- # if token.at_line_start:
- # print "at_line_start",
- # if token.must_indent:
- # print "must_indent",
- # print
-
- # WS only occurs at the start of the line
- # There may be WS followed by NEWLINE so
- # only track the depth here. Don't indent/dedent
- # until there's something real.
- if token.type == "WS":
- assert depth == 0
- depth = len(token.value)
- prev_was_ws = True
- # WS tokens are never passed to the parser
- continue
-
- if token.type == "NEWLINE":
- depth = 0
- if prev_was_ws or token.at_line_start:
- # ignore blank lines
- continue
- # pass the other cases on through
- yield token
- continue
-
- # then it must be a real token (not WS, not NEWLINE)
- # which can affect the indentation level
-
- prev_was_ws = False
- if token.must_indent:
- # The current depth must be larger than the previous level
- if not (depth > levels[-1]):
- raise IndentationError("expected an indented block")
-
- levels.append(depth)
- yield INDENT(token.lineno)
-
- elif token.at_line_start:
- # Must be on the same level or one of the previous levels
- if depth == levels[-1]:
- # At the same level
- pass
- elif depth > levels[-1]:
- raise IndentationError(
- "indentation increase but not in new block")
- else:
- # Back up; but only if it matches a previous level
- try:
- i = levels.index(depth)
- except ValueError:
- raise IndentationError("inconsistent indentation")
- for _ in range(i + 1, len(levels)):
- yield DEDENT(token.lineno)
- levels.pop()
-
- yield token
-
- ### Finished processing ###
-
- # Must dedent any remaining levels
- if len(levels) > 1:
- assert token is not None
- for _ in range(1, len(levels)):
- yield DEDENT(token.lineno)
-
-
-# The top-level filter adds an ENDMARKER, if requested.
-# Python's grammar uses it.
-def filter(lexer, add_endmarker=True):
- token = None
- tokens = iter(lexer.token, None)
- tokens = track_tokens_filter(lexer, tokens)
- for token in indentation_filter(tokens):
- yield token
-
- if add_endmarker:
- lineno = 1
- if token is not None:
- lineno = token.lineno
- yield _new_token("ENDMARKER", lineno)
-
-# Combine Ply and my filters into a new lexer
-
-
-class IndentLexer(object):
-
- def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0):
- self.lexer = lex.lex(debug=debug, optimize=optimize,
- lextab=lextab, reflags=reflags)
- self.token_stream = None
-
- def input(self, s, add_endmarker=True):
- self.lexer.paren_count = 0
- self.lexer.input(s)
- self.token_stream = filter(self.lexer, add_endmarker)
-
- def token(self):
- try:
- return self.token_stream.next()
- except StopIteration:
- return None
-
-########## Parser (tokens -> AST) ######
-
-# also part of Ply
-#import yacc
-
-# I use the Python AST
-from compiler import ast
-
-# Helper function
-
-
-def Assign(left, right):
- names = []
- if isinstance(left, ast.Name):
- # Single assignment on left
- return ast.Assign([ast.AssName(left.name, 'OP_ASSIGN')], right)
- elif isinstance(left, ast.Tuple):
- # List of things - make sure they are Name nodes
- names = []
- for child in left.getChildren():
- if not isinstance(child, ast.Name):
- raise SyntaxError("that assignment not supported")
- names.append(child.name)
- ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names]
- return ast.Assign([ast.AssTuple(ass_list)], right)
- else:
- raise SyntaxError("Can't do that yet")
-
-
-# The grammar comments come from Python's Grammar/Grammar file
-
-# NB: compound_stmt in single_input is followed by extra NEWLINE!
-# file_input: (NEWLINE | stmt)* ENDMARKER
-def p_file_input_end(p):
- """file_input_end : file_input ENDMARKER"""
- p[0] = ast.Stmt(p[1])
-
-
-def p_file_input(p):
- """file_input : file_input NEWLINE
- | file_input stmt
- | NEWLINE
- | stmt"""
- if isinstance(p[len(p) - 1], basestring):
- if len(p) == 3:
- p[0] = p[1]
- else:
- p[0] = [] # p == 2 --> only a blank line
- else:
- if len(p) == 3:
- p[0] = p[1] + p[2]
- else:
- p[0] = p[1]
-
-
-# funcdef: [decorators] 'def' NAME parameters ':' suite
-# ignoring decorators
-def p_funcdef(p):
- "funcdef : DEF NAME parameters COLON suite"
- p[0] = ast.Function(None, p[2], tuple(p[3]), (), 0, None, p[5])
-
-# parameters: '(' [varargslist] ')'
-
-
-def p_parameters(p):
- """parameters : LPAR RPAR
- | LPAR varargslist RPAR"""
- if len(p) == 3:
- p[0] = []
- else:
- p[0] = p[2]
-
-
-# varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
-# highly simplified
-def p_varargslist(p):
- """varargslist : varargslist COMMA NAME
- | NAME"""
- if len(p) == 4:
- p[0] = p[1] + p[3]
- else:
- p[0] = [p[1]]
-
-# stmt: simple_stmt | compound_stmt
-
-
-def p_stmt_simple(p):
- """stmt : simple_stmt"""
- # simple_stmt is a list
- p[0] = p[1]
-
-
-def p_stmt_compound(p):
- """stmt : compound_stmt"""
- p[0] = [p[1]]
-
-# simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
-
-
-def p_simple_stmt(p):
- """simple_stmt : small_stmts NEWLINE
- | small_stmts SEMICOLON NEWLINE"""
- p[0] = p[1]
-
-
-def p_small_stmts(p):
- """small_stmts : small_stmts SEMICOLON small_stmt
- | small_stmt"""
- if len(p) == 4:
- p[0] = p[1] + [p[3]]
- else:
- p[0] = [p[1]]
-
-# small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
-# import_stmt | global_stmt | exec_stmt | assert_stmt
-
-
-def p_small_stmt(p):
- """small_stmt : flow_stmt
- | expr_stmt"""
- p[0] = p[1]
-
-# expr_stmt: testlist (augassign (yield_expr|testlist) |
-# ('=' (yield_expr|testlist))*)
-# augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
-# '<<=' | '>>=' | '**=' | '//=')
-
-
-def p_expr_stmt(p):
- """expr_stmt : testlist ASSIGN testlist
- | testlist """
- if len(p) == 2:
- # a list of expressions
- p[0] = ast.Discard(p[1])
- else:
- p[0] = Assign(p[1], p[3])
-
-
-def p_flow_stmt(p):
- "flow_stmt : return_stmt"
- p[0] = p[1]
-
-# return_stmt: 'return' [testlist]
-
-
-def p_return_stmt(p):
- "return_stmt : RETURN testlist"
- p[0] = ast.Return(p[2])
-
-
-def p_compound_stmt(p):
- """compound_stmt : if_stmt
- | funcdef"""
- p[0] = p[1]
-
-
-def p_if_stmt(p):
- 'if_stmt : IF test COLON suite'
- p[0] = ast.If([(p[2], p[4])], None)
-
-
-def p_suite(p):
- """suite : simple_stmt
- | NEWLINE INDENT stmts DEDENT"""
- if len(p) == 2:
- p[0] = ast.Stmt(p[1])
- else:
- p[0] = ast.Stmt(p[3])
-
-
-def p_stmts(p):
- """stmts : stmts stmt
- | stmt"""
- if len(p) == 3:
- p[0] = p[1] + p[2]
- else:
- p[0] = p[1]
-
-# No using Python's approach because Ply supports precedence
-
-# comparison: expr (comp_op expr)*
-# arith_expr: term (('+'|'-') term)*
-# term: factor (('*'|'/'|'%'|'//') factor)*
-# factor: ('+'|'-'|'~') factor | power
-# comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
-
-
-def make_lt_compare((left, right)):
- return ast.Compare(left, [('<', right), ])
-
-
-def make_gt_compare((left, right)):
- return ast.Compare(left, [('>', right), ])
-
-
-def make_eq_compare((left, right)):
- return ast.Compare(left, [('==', right), ])
-
-
-binary_ops = {
- "+": ast.Add,
- "-": ast.Sub,
- "*": ast.Mul,
- "/": ast.Div,
- "<": make_lt_compare,
- ">": make_gt_compare,
- "==": make_eq_compare,
-}
-unary_ops = {
- "+": ast.UnaryAdd,
- "-": ast.UnarySub,
-}
-precedence = (
- ("left", "EQ", "GT", "LT"),
- ("left", "PLUS", "MINUS"),
- ("left", "MULT", "DIV"),
-)
-
-
-def p_comparison(p):
- """comparison : comparison PLUS comparison
- | comparison MINUS comparison
- | comparison MULT comparison
- | comparison DIV comparison
- | comparison LT comparison
- | comparison EQ comparison
- | comparison GT comparison
- | PLUS comparison
- | MINUS comparison
- | power"""
- if len(p) == 4:
- p[0] = binary_ops[p[2]]((p[1], p[3]))
- elif len(p) == 3:
- p[0] = unary_ops[p[1]](p[2])
- else:
- p[0] = p[1]
-
-# power: atom trailer* ['**' factor]
-# trailers enables function calls. I only allow one level of calls
-# so this is 'trailer'
-
-
-def p_power(p):
- """power : atom
- | atom trailer"""
- if len(p) == 2:
- p[0] = p[1]
- else:
- if p[2][0] == "CALL":
- p[0] = ast.CallFunc(p[1], p[2][1], None, None)
- else:
- raise AssertionError("not implemented")
-
-
-def p_atom_name(p):
- """atom : NAME"""
- p[0] = ast.Name(p[1])
-
-
-def p_atom_number(p):
- """atom : NUMBER
- | STRING"""
- p[0] = ast.Const(p[1])
-
-
-def p_atom_tuple(p):
- """atom : LPAR testlist RPAR"""
- p[0] = p[2]
-
-# trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
-
-
-def p_trailer(p):
- "trailer : LPAR arglist RPAR"
- p[0] = ("CALL", p[2])
-
-# testlist: test (',' test)* [',']
-# Contains shift/reduce error
-
-
-def p_testlist(p):
- """testlist : testlist_multi COMMA
- | testlist_multi """
- if len(p) == 2:
- p[0] = p[1]
- else:
- # May need to promote singleton to tuple
- if isinstance(p[1], list):
- p[0] = p[1]
- else:
- p[0] = [p[1]]
- # Convert into a tuple?
- if isinstance(p[0], list):
- p[0] = ast.Tuple(p[0])
-
-
-def p_testlist_multi(p):
- """testlist_multi : testlist_multi COMMA test
- | test"""
- if len(p) == 2:
- # singleton
- p[0] = p[1]
- else:
- if isinstance(p[1], list):
- p[0] = p[1] + [p[3]]
- else:
- # singleton -> tuple
- p[0] = [p[1], p[3]]
-
-
-# test: or_test ['if' or_test 'else' test] | lambdef
-# as I don't support 'and', 'or', and 'not' this works down to 'comparison'
-def p_test(p):
- "test : comparison"
- p[0] = p[1]
-
-
-# arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
-# XXX INCOMPLETE: this doesn't allow the trailing comma
-def p_arglist(p):
- """arglist : arglist COMMA argument
- | argument"""
- if len(p) == 4:
- p[0] = p[1] + [p[3]]
- else:
- p[0] = [p[1]]
-
-# argument: test [gen_for] | test '=' test # Really [keyword '='] test
-
-
-def p_argument(p):
- "argument : test"
- p[0] = p[1]
-
-
-def p_error(p):
- # print "Error!", repr(p)
- raise SyntaxError(p)
-
-
-class GardenSnakeParser(object):
-
- def __init__(self, lexer=None):
- if lexer is None:
- lexer = IndentLexer()
- self.lexer = lexer
- self.parser = yacc.yacc(start="file_input_end")
-
- def parse(self, code):
- self.lexer.input(code)
- result = self.parser.parse(lexer=self.lexer)
- return ast.Module(None, result)
-
-
-###### Code generation ######
-
-from compiler import misc, syntax, pycodegen
-
-
-class GardenSnakeCompiler(object):
-
- def __init__(self):
- self.parser = GardenSnakeParser()
-
- def compile(self, code, filename="<string>"):
- tree = self.parser.parse(code)
- # print tree
- misc.set_filename(filename, tree)
- syntax.check(tree)
- gen = pycodegen.ModuleCodeGenerator(tree)
- code = gen.getCode()
- return code
-
-####### Test code #######
-
-compile = GardenSnakeCompiler().compile
-
-code = r"""
-
-print('LET\'S TRY THIS \\OUT')
-
-#Comment here
-def x(a):
- print('called with',a)
- if a == 1:
- return 2
- if a*2 > 10: return 999 / 4
- # Another comment here
-
- return a+2*3
-
-ints = (1, 2,
- 3, 4,
-5)
-print('mutiline-expression', ints)
-
-t = 4+1/3*2+6*(9-5+1)
-print('predence test; should be 34+2/3:', t, t==(34+2/3))
-
-print('numbers', 1,2,3,4,5)
-if 1:
- 8
- a=9
- print(x(a))
-
-print(x(1))
-print(x(2))
-print(x(8),'3')
-print('this is decimal', 1/5)
-print('BIG DECIMAL', 1.234567891234567e12345)
-
-"""
-
-# Set up the GardenSnake run-time environment
-
-
-def print_(*args):
- print "-->", " ".join(map(str, args))
-
-globals()["print"] = print_
-
-compiled_code = compile(code)
-
-exec compiled_code in globals()
-print "Done"
diff --git a/example/GardenSnake/README b/example/GardenSnake/README
deleted file mode 100644
index 4d8be2d..0000000
--- a/example/GardenSnake/README
+++ /dev/null
@@ -1,5 +0,0 @@
-This example is Andrew Dalke's GardenSnake language. It shows how to process an
-indentation-like language like Python. Further details can be found here:
-
-http://dalkescientific.com/writings/diary/archive/2006/08/30/gardensnake_language.html
-
diff --git a/example/README b/example/README
index 63519b5..a7ec6e8 100644
--- a/example/README
+++ b/example/README
@@ -5,6 +5,5 @@ Simple examples:
Complex examples
ansic - ANSI C grammar from K&R
BASIC - A small BASIC interpreter
- GardenSnake - A simple python-like language
yply - Converts Unix yacc files to PLY programs.
diff --git a/example/calc/calc.py b/example/calc/calc.py
index 824c3d7..406d83c 100644
--- a/example/calc/calc.py
+++ b/example/calc/calc.py
@@ -8,9 +8,6 @@
import sys
sys.path.insert(0, "../..")
-if sys.version_info[0] >= 3:
- raw_input = input
-
tokens = (
'NAME', 'NUMBER',
)
@@ -29,19 +26,17 @@ def t_NUMBER(t):
t_ignore = " \t"
-
def t_newline(t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
-
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
import ply.lex as lex
-lex.lex()
+lexer = lex.lex()
# Parsing rules
@@ -54,7 +49,6 @@ precedence = (
# dictionary of names
names = {}
-
def p_statement_assign(p):
'statement : NAME "=" expression'
names[p[1]] = p[3]
@@ -111,11 +105,11 @@ def p_error(p):
print("Syntax error at EOF")
import ply.yacc as yacc
-yacc.yacc()
+parser = yacc.yacc()
-while 1:
+while True:
try:
- s = raw_input('calc > ')
+ s = input('calc > ')
except EOFError:
break
if not s:
diff --git a/example/calcdebug/calc.py b/example/calcdebug/calc.py
index 06831e2..386000e 100644
--- a/example/calcdebug/calc.py
+++ b/example/calcdebug/calc.py
@@ -8,9 +8,6 @@
import sys
sys.path.insert(0, "../..")
-if sys.version_info[0] >= 3:
- raw_input = input
-
tokens = (
'NAME', 'NUMBER',
)
@@ -119,9 +116,9 @@ logging.basicConfig(
filename="parselog.txt"
)
-while 1:
+while True:
try:
- s = raw_input('calc > ')
+ s = input('calc > ')
except EOFError:
break
if not s:
diff --git a/example/calceof/calc.py b/example/calceof/calc.py
index 22b39a4..7bb7e0f 100644
--- a/example/calceof/calc.py
+++ b/example/calceof/calc.py
@@ -8,9 +8,6 @@
import sys
sys.path.insert(0, "../..")
-if sys.version_info[0] >= 3:
- raw_input = input
-
tokens = (
'NAME', 'NUMBER',
)
@@ -36,7 +33,7 @@ def t_newline(t):
def t_eof(t):
- more = raw_input('... ')
+ more = input('... ')
if more:
t.lexer.input(more + '\n')
return t.lexer.token()
@@ -122,9 +119,9 @@ def p_error(p):
import ply.yacc as yacc
yacc.yacc()
-while 1:
+while True:
try:
- s = raw_input('calc > ')
+ s = input('calc > ')
except EOFError:
break
if not s:
diff --git a/example/classcalc/calc.py b/example/classcalc/calc.py
index ada4afd..6f35195 100755
--- a/example/classcalc/calc.py
+++ b/example/classcalc/calc.py
@@ -12,9 +12,6 @@
import sys
sys.path.insert(0, "../..")
-if sys.version_info[0] >= 3:
- raw_input = input
-
import ply.lex as lex
import ply.yacc as yacc
import os
@@ -36,20 +33,18 @@ class Parser:
except:
modname = "parser" + "_" + self.__class__.__name__
self.debugfile = modname + ".dbg"
- self.tabmodule = modname + "_" + "parsetab"
- # print self.debugfile, self.tabmodule
+ # print self.debugfile
# Build the lexer and parser
lex.lex(module=self, debug=self.debug)
yacc.yacc(module=self,
debug=self.debug,
- debugfile=self.debugfile,
- tabmodule=self.tabmodule)
+ debugfile=self.debugfile)
def run(self):
- while 1:
+ while True:
try:
- s = raw_input('calc > ')
+ s = input('calc > ')
except EOFError:
break
if not s:
diff --git a/example/closurecalc/calc.py b/example/closurecalc/calc.py
index 6031b05..59c9d6f 100644
--- a/example/closurecalc/calc.py
+++ b/example/closurecalc/calc.py
@@ -9,9 +9,6 @@
import sys
sys.path.insert(0, "../..")
-if sys.version_info[0] >= 3:
- raw_input = input
-
# Make a calculator function
@@ -124,7 +121,7 @@ calc = make_calculator()
while True:
try:
- s = raw_input("calc > ")
+ s = input("calc > ")
except EOFError:
break
r = calc(s)
diff --git a/example/hedit/hedit.py b/example/hedit/hedit.py
deleted file mode 100644
index 32da745..0000000
--- a/example/hedit/hedit.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# -----------------------------------------------------------------------------
-# hedit.py
-#
-# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
-#
-# These tokens can't be easily tokenized because they are of the following
-# form:
-#
-# nHc1...cn
-#
-# where n is a positive integer and c1 ... cn are characters.
-#
-# This example shows how to modify the state of the lexer to parse
-# such tokens
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-
-tokens = (
- 'H_EDIT_DESCRIPTOR',
-)
-
-# Tokens
-t_ignore = " \t\n"
-
-
-def t_H_EDIT_DESCRIPTOR(t):
- r"\d+H.*" # This grabs all of the remaining text
- i = t.value.index('H')
- n = eval(t.value[:i])
-
- # Adjust the tokenizing position
- t.lexer.lexpos -= len(t.value) - (i + 1 + n)
-
- t.value = t.value[i + 1:i + 1 + n]
- return t
-
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-lex.runmain()
diff --git a/example/newclasscalc/calc.py b/example/newclasscalc/calc.py
deleted file mode 100755
index 43c9506..0000000
--- a/example/newclasscalc/calc.py
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/env python
-
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-#
-# Class-based example contributed to PLY by David McNab.
-#
-# Modified to use new-style classes. Test case.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-import ply.lex as lex
-import ply.yacc as yacc
-import os
-
-
-class Parser(object):
- """
- Base class for a lexer/parser that has the rules defined as methods
- """
- tokens = ()
- precedence = ()
-
- def __init__(self, **kw):
- self.debug = kw.get('debug', 0)
- self.names = {}
- try:
- modname = os.path.split(os.path.splitext(__file__)[0])[
- 1] + "_" + self.__class__.__name__
- except:
- modname = "parser" + "_" + self.__class__.__name__
- self.debugfile = modname + ".dbg"
- self.tabmodule = modname + "_" + "parsetab"
- # print self.debugfile, self.tabmodule
-
- # Build the lexer and parser
- lex.lex(module=self, debug=self.debug)
- yacc.yacc(module=self,
- debug=self.debug,
- debugfile=self.debugfile,
- tabmodule=self.tabmodule)
-
- def run(self):
- while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(s)
-
-
-class Calc(Parser):
-
- tokens = (
- 'NAME', 'NUMBER',
- 'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS',
- 'LPAREN', 'RPAREN',
- )
-
- # Tokens
-
- t_PLUS = r'\+'
- t_MINUS = r'-'
- t_EXP = r'\*\*'
- t_TIMES = r'\*'
- t_DIVIDE = r'/'
- t_EQUALS = r'='
- t_LPAREN = r'\('
- t_RPAREN = r'\)'
- t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
- def t_NUMBER(self, t):
- r'\d+'
- try:
- t.value = int(t.value)
- except ValueError:
- print("Integer value too large %s" % t.value)
- t.value = 0
- # print "parsed number %s" % repr(t.value)
- return t
-
- t_ignore = " \t"
-
- def t_newline(self, t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
- def t_error(self, t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
- # Parsing rules
-
- precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('left', 'EXP'),
- ('right', 'UMINUS'),
- )
-
- def p_statement_assign(self, p):
- 'statement : NAME EQUALS expression'
- self.names[p[1]] = p[3]
-
- def p_statement_expr(self, p):
- 'statement : expression'
- print(p[1])
-
- def p_expression_binop(self, p):
- """
- expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression
- | expression EXP expression
- """
- # print [repr(p[i]) for i in range(0,4)]
- if p[2] == '+':
- p[0] = p[1] + p[3]
- elif p[2] == '-':
- p[0] = p[1] - p[3]
- elif p[2] == '*':
- p[0] = p[1] * p[3]
- elif p[2] == '/':
- p[0] = p[1] / p[3]
- elif p[2] == '**':
- p[0] = p[1] ** p[3]
-
- def p_expression_uminus(self, p):
- 'expression : MINUS expression %prec UMINUS'
- p[0] = -p[2]
-
- def p_expression_group(self, p):
- 'expression : LPAREN expression RPAREN'
- p[0] = p[2]
-
- def p_expression_number(self, p):
- 'expression : NUMBER'
- p[0] = p[1]
-
- def p_expression_name(self, p):
- 'expression : NAME'
- try:
- p[0] = self.names[p[1]]
- except LookupError:
- print("Undefined name '%s'" % p[1])
- p[0] = 0
-
- def p_error(self, p):
- if p:
- print("Syntax error at '%s'" % p.value)
- else:
- print("Syntax error at EOF")
-
-if __name__ == '__main__':
- calc = Calc()
- calc.run()
diff --git a/example/optcalc/README b/example/optcalc/README
deleted file mode 100644
index 53dd5fc..0000000
--- a/example/optcalc/README
+++ /dev/null
@@ -1,9 +0,0 @@
-An example showing how to use Python optimized mode.
-To run:
-
- - First run 'python calc.py'
-
- - Then run 'python -OO calc.py'
-
-If working correctly, the second version should run the
-same way.
diff --git a/example/optcalc/calc.py b/example/optcalc/calc.py
deleted file mode 100644
index 0c223e5..0000000
--- a/example/optcalc/calc.py
+++ /dev/null
@@ -1,134 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-if sys.version_info[0] >= 3:
- raw_input = input
-
-tokens = (
- 'NAME', 'NUMBER',
- 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
- 'LPAREN', 'RPAREN',
-)
-
-# Tokens
-
-t_PLUS = r'\+'
-t_MINUS = r'-'
-t_TIMES = r'\*'
-t_DIVIDE = r'/'
-t_EQUALS = r'='
-t_LPAREN = r'\('
-t_RPAREN = r'\)'
-t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
-
-
-def t_NUMBER(t):
- r'\d+'
- try:
- t.value = int(t.value)
- except ValueError:
- print("Integer value too large %s" % t.value)
- t.value = 0
- return t
-
-t_ignore = " \t"
-
-
-def t_newline(t):
- r'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-
-def t_error(t):
- print("Illegal character '%s'" % t.value[0])
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex(optimize=1)
-
-# Parsing rules
-
-precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('right', 'UMINUS'),
-)
-
-# dictionary of names
-names = {}
-
-
-def p_statement_assign(t):
- 'statement : NAME EQUALS expression'
- names[t[1]] = t[3]
-
-
-def p_statement_expr(t):
- 'statement : expression'
- print(t[1])
-
-
-def p_expression_binop(t):
- '''expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression'''
- if t[2] == '+':
- t[0] = t[1] + t[3]
- elif t[2] == '-':
- t[0] = t[1] - t[3]
- elif t[2] == '*':
- t[0] = t[1] * t[3]
- elif t[2] == '/':
- t[0] = t[1] / t[3]
- elif t[2] == '<':
- t[0] = t[1] < t[3]
-
-
-def p_expression_uminus(t):
- 'expression : MINUS expression %prec UMINUS'
- t[0] = -t[2]
-
-
-def p_expression_group(t):
- 'expression : LPAREN expression RPAREN'
- t[0] = t[2]
-
-
-def p_expression_number(t):
- 'expression : NUMBER'
- t[0] = t[1]
-
-
-def p_expression_name(t):
- 'expression : NAME'
- try:
- t[0] = names[t[1]]
- except LookupError:
- print("Undefined name '%s'" % t[1])
- t[0] = 0
-
-
-def p_error(t):
- if t:
- print("Syntax error at '%s'" % t.value)
- else:
- print("Syntax error at EOF")
-
-import ply.yacc as yacc
-yacc.yacc(optimize=1)
-
-while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- yacc.parse(s)
diff --git a/example/unicalc/calc.py b/example/unicalc/calc.py
deleted file mode 100644
index 901c4b9..0000000
--- a/example/unicalc/calc.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# -----------------------------------------------------------------------------
-# calc.py
-#
-# A simple calculator with variables. This is from O'Reilly's
-# "Lex and Yacc", p. 63.
-#
-# This example uses unicode strings for tokens, docstrings, and input.
-# -----------------------------------------------------------------------------
-
-import sys
-sys.path.insert(0, "../..")
-
-tokens = (
- 'NAME', 'NUMBER',
- 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
- 'LPAREN', 'RPAREN',
-)
-
-# Tokens
-
-t_PLUS = ur'\+'
-t_MINUS = ur'-'
-t_TIMES = ur'\*'
-t_DIVIDE = ur'/'
-t_EQUALS = ur'='
-t_LPAREN = ur'\('
-t_RPAREN = ur'\)'
-t_NAME = ur'[a-zA-Z_][a-zA-Z0-9_]*'
-
-
-def t_NUMBER(t):
- ur'\d+'
- try:
- t.value = int(t.value)
- except ValueError:
- print "Integer value too large", t.value
- t.value = 0
- return t
-
-t_ignore = u" \t"
-
-
-def t_newline(t):
- ur'\n+'
- t.lexer.lineno += t.value.count("\n")
-
-
-def t_error(t):
- print "Illegal character '%s'" % t.value[0]
- t.lexer.skip(1)
-
-# Build the lexer
-import ply.lex as lex
-lex.lex()
-
-# Parsing rules
-
-precedence = (
- ('left', 'PLUS', 'MINUS'),
- ('left', 'TIMES', 'DIVIDE'),
- ('right', 'UMINUS'),
-)
-
-# dictionary of names
-names = {}
-
-
-def p_statement_assign(p):
- 'statement : NAME EQUALS expression'
- names[p[1]] = p[3]
-
-
-def p_statement_expr(p):
- 'statement : expression'
- print p[1]
-
-
-def p_expression_binop(p):
- '''expression : expression PLUS expression
- | expression MINUS expression
- | expression TIMES expression
- | expression DIVIDE expression'''
- if p[2] == u'+':
- p[0] = p[1] + p[3]
- elif p[2] == u'-':
- p[0] = p[1] - p[3]
- elif p[2] == u'*':
- p[0] = p[1] * p[3]
- elif p[2] == u'/':
- p[0] = p[1] / p[3]
-
-
-def p_expression_uminus(p):
- 'expression : MINUS expression %prec UMINUS'
- p[0] = -p[2]
-
-
-def p_expression_group(p):
- 'expression : LPAREN expression RPAREN'
- p[0] = p[2]
-
-
-def p_expression_number(p):
- 'expression : NUMBER'
- p[0] = p[1]
-
-
-def p_expression_name(p):
- 'expression : NAME'
- try:
- p[0] = names[p[1]]
- except LookupError:
- print "Undefined name '%s'" % p[1]
- p[0] = 0
-
-
-def p_error(p):
- if p:
- print "Syntax error at '%s'" % p.value
- else:
- print "Syntax error at EOF"
-
-import ply.yacc as yacc
-yacc.yacc()
-
-while 1:
- try:
- s = raw_input('calc > ')
- except EOFError:
- break
- if not s:
- continue
- yacc.parse(unicode(s))
diff --git a/example/yply/yparse.py b/example/yply/yparse.py
index 1f2e8d0..b2c8863 100644
--- a/example/yply/yparse.py
+++ b/example/yply/yparse.py
@@ -233,7 +233,7 @@ def p_empty(p):
def p_error(p):
pass
-yacc.yacc(debug=0)
+yacc.yacc(debug=False)
def print_code(code, indent):