summaryrefslogtreecommitdiff
path: root/scss/src/yapps/grammar.py
diff options
context:
space:
mode:
Diffstat (limited to 'scss/src/yapps/grammar.py')
-rw-r--r--scss/src/yapps/grammar.py262
1 files changed, 262 insertions, 0 deletions
diff --git a/scss/src/yapps/grammar.py b/scss/src/yapps/grammar.py
new file mode 100644
index 0000000..fa12e4c
--- /dev/null
+++ b/scss/src/yapps/grammar.py
@@ -0,0 +1,262 @@
+# grammar.py, part of Yapps 2 - yet another python parser system
+# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
+# Enhancements copyright 2003-2004 by Matthias Urlichs <smurf@debian.org>
+#
+# This version of the Yapps 2 grammar can be distributed under the
+# terms of the MIT open source license, either found in the LICENSE
+# file included with the Yapps distribution
+# <http://theory.stanford.edu/~amitp/yapps/> or at
+# <http://www.opensource.org/licenses/mit-license.php>
+#
+
+"""Parser for Yapps grammars.
+
+This file defines the grammar of Yapps grammars. Naturally, it is
+implemented in Yapps. The grammar.py module needed by Yapps is built
+by running Yapps on yapps_grammar.g. (Holy circularity, Batman!)
+
+"""
+
+try:
+ from yapps import parsetree
+except ImportError:
+ import parsetree
+
+
+######################################################################
+def cleanup_choice(rule, lst):
+ if len(lst) == 0:
+ return parsetree.Sequence(rule, [])
+ if len(lst) == 1:
+ return lst[0]
+ return parsetree.Choice(rule, *tuple(lst))
+
+
+def cleanup_sequence(rule, lst):
+ if len(lst) == 1:
+ return lst[0]
+ return parsetree.Sequence(rule, *tuple(lst))
+
+
+def resolve_name(rule, tokens, id, args):
+ if id in [x[0] for x in tokens]:
+ # It's a token
+ if args:
+ print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
+ return parsetree.Terminal(rule, id)
+ else:
+ # It's a name, so assume it's a nonterminal
+ return parsetree.NonTerminal(rule, id, args)
+
+
+# Begin -- grammar generated by Yapps
+import re
+try:
+ from yapps.runtime import Scanner, Parser, Context, wrap_error_reporter
+except ImportError:
+ from runtime import Scanner, Parser, Context, wrap_error_reporter
+
+
+class ParserDescriptionScanner(Scanner):
+ patterns = None
+ _patterns = [
+ ('"rule"', 'rule'),
+ ('"ignore"', 'ignore'),
+ ('"token"', 'token'),
+ ('"option"', 'option'),
+ ('":"', ':'),
+ ('"parser"', 'parser'),
+ ('[ \t\r\n]+', '[ \t\r\n]+'),
+ ('#.*?\r?\n', '#.*?\r?\n'),
+ ('EOF', '$'),
+ ('ATTR', '<<.+?>>'),
+ ('STMT', '{{.+?}}'),
+ ('ID', '[a-zA-Z_][a-zA-Z_0-9]*'),
+ ('STR', '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'),
+ ('LP', '\\('),
+ ('RP', '\\)'),
+ ('LB', '\\['),
+ ('RB', '\\]'),
+ ('OR', '[|]'),
+ ('STAR', '[*]'),
+ ('PLUS', '[+]'),
+ ('QUEST', '[?]'),
+ ('COLON', ':'),
+ ]
+
+ def __init__(self, str, *args, **kw):
+ if hasattr(self, 'setup_patterns'):
+ self.setup_patterns(self._patterns)
+ elif self.patterns is None:
+ self.__class__.patterns = []
+ for t, p in self._patterns:
+ self.patterns.append((t, re.compile(p)))
+ super(ParserDescriptionScanner, self).__init__(None, {'[ \t\r\n]+': None, '#.*?\r?\n': None, }, str, *args, **kw)
+
+
+class ParserDescription(Parser):
+ Context = Context
+
+ def Parser(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Parser', [])
+ self._scan('"parser"', context=_context)
+ ID = self._scan('ID', context=_context)
+ self._scan('":"', context=_context)
+ Options = self.Options(_context)
+ Tokens = self.Tokens(_context)
+ Rules = self.Rules(Tokens, _context)
+ EOF = self._scan('EOF', context=_context)
+ return parsetree.Generator(ID, Options, Tokens, Rules)
+
+ def Options(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Options', [])
+ opt = {}
+ while self._peek(self._option_ignore_rule_EOF_token, context=_context) == self._option:
+ self._scan('"option"', context=_context)
+ self._scan('":"', context=_context)
+ Str = self.Str(_context)
+ opt[Str] = 1
+ return opt
+
+ def Tokens(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Tokens', [])
+ tok = []
+ while self._peek(self._ignore_rule_EOF_token, context=_context) in self._ignore_token:
+ _token = self._peek(self._ignore_token, context=_context)
+ if _token == self._token:
+ self._scan('"token"', context=_context)
+ ID = self._scan('ID', context=_context)
+ self._scan('":"', context=_context)
+ Str = self.Str(_context)
+ tid = (ID, Str)
+ if self._peek(self._ignore_rule_token_STMT_EOF, context=_context) == self._STMT:
+ STMT = self._scan('STMT', context=_context)
+ tid += (STMT[2:-2],)
+ tok.append(tid)
+ else: # == self._ignore
+ self._scan('"ignore"', context=_context)
+ self._scan('":"', context=_context)
+ Str = self.Str(_context)
+ ign = ('#ignore', Str)
+ if self._peek(self._ignore_rule_token_STMT_EOF, context=_context) == self._STMT:
+ STMT = self._scan('STMT', context=_context)
+ ign += (STMT[2:-2],)
+ tok.append(ign)
+ return tok
+
+ def Rules(self, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Rules', [tokens])
+ rul = []
+ while self._peek(self._rule_EOF, context=_context) == self._rule:
+ self._scan('"rule"', context=_context)
+ ID = self._scan('ID', context=_context)
+ OptParam = self.OptParam(_context)
+ self._scan('":"', context=_context)
+ ClauseA = self.ClauseA(ID, tokens, _context)
+ rul.append((ID, OptParam, ClauseA))
+ return rul
+
+ def ClauseA(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseA', [rule, tokens])
+ ClauseB = self.ClauseB(rule, tokens, _context)
+ v = [ClauseB]
+ while self._peek(self._rule_RP_OR_RB_EOF, context=_context) == self._OR:
+ OR = self._scan('OR', context=_context)
+ ClauseB = self.ClauseB(rule, tokens, _context)
+ v.append(ClauseB)
+ return cleanup_choice(rule, v)
+
+ def ClauseB(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseB', [rule, tokens])
+ v = []
+ while self._peek(self._LB_LP_rule_RB_STMT_ID_STR_RP__, context=_context) in self._STMT_LB_ID_STR_LP:
+ ClauseC = self.ClauseC(rule, tokens, _context)
+ v.append(ClauseC)
+ return cleanup_sequence(rule, v)
+
+ def ClauseC(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseC', [rule, tokens])
+ ClauseD = self.ClauseD(rule, tokens, _context)
+ _token = self._peek(self._RB_STAR_LP_rule_RP_STMT_OR_QU_, context=_context)
+ if _token == self._PLUS:
+ PLUS = self._scan('PLUS', context=_context)
+ return parsetree.Plus(rule, ClauseD)
+ elif _token == self._STAR:
+ STAR = self._scan('STAR', context=_context)
+ return parsetree.Star(rule, ClauseD)
+ elif _token == self._QUEST:
+ QUEST = self._scan('QUEST', context=_context)
+ return parsetree.Option(rule, ClauseD)
+ else: # in self._LB_LP_rule_RB_STMT_ID_STR_RP__
+ return ClauseD
+
+ def ClauseD(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseD', [rule, tokens])
+ _token = self._peek(self._STMT_LB_ID_STR_LP, context=_context)
+ if _token == self._STR:
+ STR = self._scan('STR', context=_context)
+ t = (STR, eval(STR, {}, {}))
+ if t not in tokens:
+ tokens.insert(0, t)
+ return parsetree.Terminal(rule, STR)
+ elif _token == self._ID:
+ ID = self._scan('ID', context=_context)
+ OptParam = self.OptParam(_context)
+ return resolve_name(rule, tokens, ID, OptParam)
+ elif _token == self._LP:
+ LP = self._scan('LP', context=_context)
+ ClauseA = self.ClauseA(rule, tokens, _context)
+ RP = self._scan('RP', context=_context)
+ return ClauseA
+ elif _token == self._LB:
+ LB = self._scan('LB', context=_context)
+ ClauseA = self.ClauseA(rule, tokens, _context)
+ RB = self._scan('RB', context=_context)
+ return parsetree.Option(rule, ClauseA)
+ else: # == self._STMT
+ STMT = self._scan('STMT', context=_context)
+ return parsetree.Eval(rule, STMT[2:-2])
+
+ def OptParam(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'OptParam', [])
+ if self._peek(self._RB_STAR_ATTR_LP_rule_RP_STMT__, context=_context) == self._ATTR:
+ ATTR = self._scan('ATTR', context=_context)
+ return ATTR[2:-2]
+ return ''
+
+ def Str(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Str', [])
+ STR = self._scan('STR', context=_context)
+ return eval(STR, {}, {})
+
+ _RB_STAR_ATTR_LP_rule_RP_STMT__ = set(['RB', 'STAR', 'ATTR', 'LP', '"rule"', 'RP', 'STMT', 'OR', 'QUEST', 'PLUS', '":"', 'STR', 'LB', 'ID', 'EOF'])
+ _STR = 'STR'
+ _RB_STAR_LP_rule_RP_STMT_OR_QU_ = set(['RB', 'STAR', 'LP', '"rule"', 'RP', 'STMT', 'OR', 'QUEST', 'PLUS', 'STR', 'LB', 'ID', 'EOF'])
+ _LP = 'LP'
+ _STAR = 'STAR'
+ _ignore_rule_EOF_token = set(['"ignore"', '"rule"', 'EOF', '"token"'])
+ _ATTR = 'ATTR'
+ _rule = '"rule"'
+ _LB = 'LB'
+ _option = '"option"'
+ _STMT = 'STMT'
+ _token = '"token"'
+ _rule_RP_OR_RB_EOF = set(['"rule"', 'RP', 'OR', 'RB', 'EOF'])
+ _option_ignore_rule_EOF_token = set(['"option"', '"ignore"', '"rule"', 'EOF', '"token"'])
+ _rule_EOF = set(['"rule"', 'EOF'])
+ _ignore = '"ignore"'
+ _QUEST = 'QUEST'
+ _OR = 'OR'
+ _PLUS = 'PLUS'
+ _STMT_LB_ID_STR_LP = set(['STMT', 'LB', 'ID', 'STR', 'LP'])
+ _ignore_rule_token_STMT_EOF = set(['"ignore"', '"rule"', '"token"', 'STMT', 'EOF'])
+ _LB_LP_rule_RB_STMT_ID_STR_RP__ = set(['LB', 'LP', '"rule"', 'RB', 'STMT', 'ID', 'STR', 'RP', 'OR', 'EOF'])
+ _ID = 'ID'
+ _ignore_token = set(['"ignore"', '"token"'])
+
+
+def parse(rule, text):
+ P = ParserDescription(ParserDescriptionScanner(text))
+ return wrap_error_reporter(P, rule)
+
+# End -- grammar generated by Yapps