summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGerman M. Bravo <german.mb@deipi.com>2013-08-15 13:18:37 -0500
committerGerman M. Bravo <german.mb@deipi.com>2013-08-16 10:16:22 -0500
commit0416b034d8e80e134b307515ce2f755abcc691eb (patch)
tree295af0c9e48860ef8bd37f4b2c0c1b26c1c2cfdc
parent64d6450afbb0822171935b8d6b9971cea58a6fba (diff)
downloadpyscss-0416b034d8e80e134b307515ce2f755abcc691eb.tar.gz
Yapps updated
-rw-r--r--scss/_native.py27
-rw-r--r--scss/src/grammar/scss.py256
-rwxr-xr-xscss/src/grammar/yapps2.py901
-rw-r--r--scss/src/grammar/yappsrt.py275
-rw-r--r--scss/src/yapps/LICENSE (renamed from scss/src/grammar/LICENSE)0
-rw-r--r--scss/src/yapps/README (renamed from scss/src/grammar/README)0
-rw-r--r--scss/src/yapps/__init__.py0
-rwxr-xr-xscss/src/yapps/cli_tool.py119
-rw-r--r--scss/src/yapps/grammar.py262
-rw-r--r--scss/src/yapps/parsetree.py774
-rw-r--r--scss/src/yapps/runtime.py443
-rw-r--r--scss/src/yapps/scss.g (renamed from scss/src/grammar/scss.g)0
-rw-r--r--scss/src/yapps/scss.py299
-rw-r--r--scss/src/yapps/yapps_grammar.g133
14 files changed, 2045 insertions, 1444 deletions
diff --git a/scss/_native.py b/scss/_native.py
index 7abeb5b..a47a8d4 100644
--- a/scss/_native.py
+++ b/scss/_native.py
@@ -216,23 +216,26 @@ class Scanner(object):
msg = "%s found while trying to find one of the restricted tokens: %s" % ("???" if tok is None else repr(tok), ", ".join(repr(r) for r in restrict))
raise SyntaxError("SyntaxError[@ char %s: %s]" % (repr(self.pos), msg), context=context)
- # If we found something that isn't to be ignored, return it
- if best_pat in self.ignore:
- # This token should be ignored...
- self.pos += best_match
- else:
- end_pos = self.pos + best_match
- # Create a token with this data
+ ignore = best_pat in self.ignore
+ end_pos = self.pos + best_match
+ value = self.input[self.pos:end_pos]
+ if not ignore:
+ # token = Token(type=best_pat, value=value, pos=self.get_pos())
token = (
self.pos,
end_pos,
best_pat,
- self.input[self.pos:end_pos]
+ value,
)
- self.pos = end_pos
- # Only add this token if it's not in the list
- # (to prevent looping)
- if not self.tokens or token != self.tokens[-1]:
+ self.pos = end_pos
+
+ # If we found something that isn't to be ignored, return it
+ if not ignore:
+ # print repr(token)
+ if not self.tokens or token != self.last_read_token:
+ # Only add this token if it's not in the list
+ # (to prevent looping)
+ self.last_read_token = token
self.tokens.append(token)
self.restrictions.append(restrict)
return 1
diff --git a/scss/src/grammar/scss.py b/scss/src/grammar/scss.py
deleted file mode 100644
index 4aa27ee..0000000
--- a/scss/src/grammar/scss.py
+++ /dev/null
@@ -1,256 +0,0 @@
-# python yapps2.py grammar.g grammar.py
-
-################################################################################
-## Grammar compiled using Yapps:
-
-import re
-from string import *
-from yappsrt import *
-
-
-class SassExpressionScanner(Scanner):
- patterns = None
- _patterns = [
- ('":"', ':'),
- ('[ \r\t\n]+', '[ \r\t\n]+'),
- ('COMMA', ','),
- ('LPAR', '\\(|\\['),
- ('RPAR', '\\)|\\]'),
- ('END', '$'),
- ('MUL', '[*]'),
- ('DIV', '/'),
- ('ADD', '[+]'),
- ('SUB', '-\\s'),
- ('SIGN', '-(?![a-zA-Z_])'),
- ('AND', '(?<![-\\w])and(?![-\\w])'),
- ('OR', '(?<![-\\w])or(?![-\\w])'),
- ('NOT', '(?<![-\\w])not(?![-\\w])'),
- ('NE', '!='),
- ('INV', '!'),
- ('EQ', '=='),
- ('LE', '<='),
- ('GE', '>='),
- ('LT', '<'),
- ('GT', '>'),
- ('STR', "'[^']*'"),
- ('QSTR', '"[^"]*"'),
- ('UNITS', '(?<!\\s)(?:[a-zA-Z]+|%)(?![-\\w])'),
- ('NUM', '(?:\\d+(?:\\.\\d*)?|\\.\\d+)'),
- ('COLOR', '#(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3})(?![a-fA-F0-9])'),
- ('VAR', '\\$[-a-zA-Z0-9_]+'),
- ('NAME', '\\$?[-a-zA-Z0-9_]+'),
- ('FNCT', '[-a-zA-Z_][-a-zA-Z0-9_]*(?=\\()'),
- ('ID', '!?[-a-zA-Z_][-a-zA-Z0-9_]*'),
- ('BANG_IMPORTANT', '!important'),
- ]
-
- def __init__(self, input=None):
- if hasattr(self, 'setup_patterns'):
- self.setup_patterns(self._patterns)
- elif self.patterns is None:
- self.__class__.patterns = []
- for t, p in self._patterns:
- self.patterns.append((t, re.compile(p)))
- super(SassExpressionScanner, self).__init__(None, ['[ \r\t\n]+'], input)
-
-
-class SassExpression(Parser):
- def goal(self):
- expr_lst = self.expr_lst()
- END = self._scan('END')
- return expr_lst
-
- def expr_lst(self):
- expr_item = self.expr_item()
- v = [expr_item]
- while self._peek(self.expr_lst_rsts) == 'COMMA':
- COMMA = self._scan('COMMA')
- expr_item = (None, Literal(Undefined()))
- if self._peek(self.expr_lst_rsts_) not in self.expr_lst_rsts:
- expr_item = self.expr_item()
- v.append(expr_item)
- return ListLiteral(v) if len(v) > 1 else v[0][1]
-
- def expr_item(self):
- NAME = None
- if self._peek(self.expr_item_rsts) == 'NAME':
- NAME = self._scan('NAME')
- self._scan('":"')
- expr_slst = self.expr_slst()
- return (NAME, expr_slst)
-
- def expr_slst(self):
- or_expr = self.or_expr()
- v = [(None, or_expr)]
- while self._peek(self.expr_slst_rsts) not in self.expr_lst_rsts:
- or_expr = self.or_expr()
- v.append((None, or_expr))
- return ListLiteral(v, comma=False) if len(v) > 1 else v[0][1]
-
- def or_expr(self):
- and_expr = self.and_expr()
- v = and_expr
- while self._peek(self.or_expr_rsts) == 'OR':
- OR = self._scan('OR')
- and_expr = self.and_expr()
- v = AnyOp(v, and_expr)
- return v
-
- def and_expr(self):
- not_expr = self.not_expr()
- v = not_expr
- while self._peek(self.and_expr_rsts) == 'AND':
- AND = self._scan('AND')
- not_expr = self.not_expr()
- v = AllOp(v, not_expr)
- return v
-
- def not_expr(self):
- _token_ = self._peek(self.not_expr_rsts)
- if _token_ != 'NOT':
- comparison = self.comparison()
- return comparison
- else: # == 'NOT'
- NOT = self._scan('NOT')
- not_expr = self.not_expr()
- return NotOp(not_expr)
-
- def comparison(self):
- a_expr = self.a_expr()
- v = a_expr
- while self._peek(self.comparison_rsts) in self.comparison_chks:
- _token_ = self._peek(self.comparison_chks)
- if _token_ == 'LT':
- LT = self._scan('LT')
- a_expr = self.a_expr()
- v = BinaryOp(operator.lt, v, a_expr)
- elif _token_ == 'GT':
- GT = self._scan('GT')
- a_expr = self.a_expr()
- v = BinaryOp(operator.gt, v, a_expr)
- elif _token_ == 'LE':
- LE = self._scan('LE')
- a_expr = self.a_expr()
- v = BinaryOp(operator.le, v, a_expr)
- elif _token_ == 'GE':
- GE = self._scan('GE')
- a_expr = self.a_expr()
- v = BinaryOp(operator.ge, v, a_expr)
- elif _token_ == 'EQ':
- EQ = self._scan('EQ')
- a_expr = self.a_expr()
- v = BinaryOp(operator.eq, v, a_expr)
- else: # == 'NE'
- NE = self._scan('NE')
- a_expr = self.a_expr()
- v = BinaryOp(operator.ne, v, a_expr)
- return v
-
- def a_expr(self):
- m_expr = self.m_expr()
- v = m_expr
- while self._peek(self.a_expr_rsts) in self.a_expr_chks:
- _token_ = self._peek(self.a_expr_chks)
- if _token_ == 'ADD':
- ADD = self._scan('ADD')
- m_expr = self.m_expr()
- v = BinaryOp(operator.add, v, m_expr)
- else: # == 'SUB'
- SUB = self._scan('SUB')
- m_expr = self.m_expr()
- v = BinaryOp(operator.sub, v, m_expr)
- return v
-
- def m_expr(self):
- u_expr = self.u_expr()
- v = u_expr
- while self._peek(self.m_expr_rsts) in self.m_expr_chks:
- _token_ = self._peek(self.m_expr_chks)
- if _token_ == 'MUL':
- MUL = self._scan('MUL')
- u_expr = self.u_expr()
- v = BinaryOp(operator.mul, v, u_expr)
- else: # == 'DIV'
- DIV = self._scan('DIV')
- u_expr = self.u_expr()
- v = BinaryOp(operator.truediv, v, u_expr)
- return v
-
- def u_expr(self):
- _token_ = self._peek(self.u_expr_rsts)
- if _token_ == 'SIGN':
- SIGN = self._scan('SIGN')
- u_expr = self.u_expr()
- return UnaryOp(operator.neg, u_expr)
- elif _token_ == 'ADD':
- ADD = self._scan('ADD')
- u_expr = self.u_expr()
- return UnaryOp(operator.pos, u_expr)
- else: # in self.u_expr_chks
- atom = self.atom()
- return atom
-
- def atom(self):
- _token_ = self._peek(self.u_expr_chks)
- if _token_ == 'ID':
- ID = self._scan('ID')
- return Literal(parse_bareword(ID))
- elif _token_ == 'BANG_IMPORTANT':
- BANG_IMPORTANT = self._scan('BANG_IMPORTANT')
- return Literal(String(BANG_IMPORTANT, quotes=None))
- elif _token_ == 'LPAR':
- LPAR = self._scan('LPAR')
- expr_lst = ListLiteral()
- if self._peek(self.atom_rsts) not in self.atom_chks:
- expr_lst = self.expr_lst()
- RPAR = self._scan('RPAR')
- return Parentheses(expr_lst)
- elif _token_ == 'FNCT':
- FNCT = self._scan('FNCT')
- LPAR = self._scan('LPAR')
- expr_lst = ListLiteral()
- if self._peek(self.atom_rsts) not in self.atom_chks:
- expr_lst = self.expr_lst()
- RPAR = self._scan('RPAR')
- return CallOp(FNCT, expr_lst)
- elif _token_ == 'NUM':
- NUM = self._scan('NUM')
- UNITS = None
- if self._peek(self.atom_rsts_) == 'UNITS':
- UNITS = self._scan('UNITS')
- return Literal(NumberValue(float(NUM), unit=UNITS))
- elif _token_ == 'STR':
- STR = self._scan('STR')
- return Literal(String(STR[1:-1], quotes="'"))
- elif _token_ == 'QSTR':
- QSTR = self._scan('QSTR')
- return Literal(String(QSTR[1:-1], quotes='"'))
- elif _token_ == 'COLOR':
- COLOR = self._scan('COLOR')
- return Literal(ColorValue(ParserValue(COLOR)))
- else: # == 'VAR'
- VAR = self._scan('VAR')
- return Variable(VAR)
-
- m_expr_chks = set(['MUL', 'DIV'])
- comparison_rsts = set(['LPAR', 'QSTR', 'RPAR', 'BANG_IMPORTANT', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'ADD', 'FNCT', 'STR', 'VAR', 'EQ', 'ID', 'AND', 'GE', 'NOT', 'OR'])
- atom_rsts = set(['LPAR', 'BANG_IMPORTANT', 'END', 'NAME', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'FNCT', 'STR', 'NOT', 'RPAR', 'ID'])
- u_expr_chks = set(['LPAR', 'COLOR', 'QSTR', 'NUM', 'FNCT', 'STR', 'VAR', 'BANG_IMPORTANT', 'ID'])
- m_expr_rsts = set(['LPAR', 'SUB', 'QSTR', 'RPAR', 'MUL', 'DIV', 'BANG_IMPORTANT', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'GE', 'FNCT', 'STR', 'VAR', 'EQ', 'ID', 'AND', 'ADD', 'NOT', 'OR'])
- expr_lst_rsts_ = set(['LPAR', 'BANG_IMPORTANT', 'END', 'NAME', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'RPAR', 'ID'])
- a_expr_rsts = set(['LPAR', 'SUB', 'QSTR', 'RPAR', 'BANG_IMPORTANT', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'GE', 'FNCT', 'STR', 'VAR', 'EQ', 'ID', 'AND', 'ADD', 'NOT', 'OR'])
- or_expr_rsts = set(['LPAR', 'RPAR', 'BANG_IMPORTANT', 'END', 'COLOR', 'QSTR', 'ID', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'SIGN', 'OR'])
- u_expr_rsts = set(['LPAR', 'COLOR', 'QSTR', 'SIGN', 'ADD', 'NUM', 'FNCT', 'STR', 'VAR', 'BANG_IMPORTANT', 'ID'])
- expr_lst_rsts = set(['END', 'COMMA', 'RPAR'])
- expr_item_rsts = set(['LPAR', 'NAME', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'FNCT', 'STR', 'NOT', 'BANG_IMPORTANT', 'ID'])
- not_expr_rsts = set(['LPAR', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'FNCT', 'STR', 'NOT', 'BANG_IMPORTANT', 'ID'])
- atom_rsts_ = set(['LPAR', 'SUB', 'QSTR', 'RPAR', 'VAR', 'MUL', 'DIV', 'BANG_IMPORTANT', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'GE', 'FNCT', 'STR', 'UNITS', 'EQ', 'ID', 'AND', 'ADD', 'NOT', 'OR'])
- atom_chks = set(['END', 'RPAR'])
- comparison_chks = set(['GT', 'GE', 'NE', 'LT', 'LE', 'EQ'])
- a_expr_chks = set(['ADD', 'SUB'])
- and_expr_rsts = set(['AND', 'LPAR', 'RPAR', 'END', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'ID', 'BANG_IMPORTANT', 'OR'])
- expr_slst_rsts = set(['LPAR', 'RPAR', 'END', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'BANG_IMPORTANT', 'ID'])
-
-
-### Grammar ends.
-################################################################################
diff --git a/scss/src/grammar/yapps2.py b/scss/src/grammar/yapps2.py
deleted file mode 100755
index 6ccb045..0000000
--- a/scss/src/grammar/yapps2.py
+++ /dev/null
@@ -1,901 +0,0 @@
-#!/usr/bin/env python
-
-# Yapps 3.0 - yet another python parser system
-# Amit J Patel, January 1999
-# German M. Bravo, December 2011
-# See http://theory.stanford.edu/~amitp/Yapps/ for documentation and updates
-
-# v3.0.0 changes (December 2011)
-# * PEP 8 cleanups
-# * Optimizations in the scanning (added cache and cleanup() for it)
-# v2.0.1 changes (October 2001):
-# * The exceptions inherit the standard Exception class (thanks Rich Salz)
-# * The scanner can use either a different set of regular expressions
-# per instance, or allows the subclass to define class fields with
-# the patterns. This improves performance when many Scanner objects
-# are being created, because the regular expressions don't have to
-# be recompiled each time. (thanks Amaury Forgeot d'Arc)
-# v2.0.2 changes (April 2002)
-# * Bug fix: generating the 'else' clause when the comment was too
-# long. v2.0.1 was missing a newline. (thanks Steven Engelhardt)
-# v2.0.3 changes (August 2002)
-# * Bug fix: inline tokens using the r"" syntax.
-# v.2.0.4 changes (July 2003)
-# * Style change: Replaced `expr` with repr(expr)
-# * Style change: Changed (b >= a and b < c) into (a <= b < c)
-# * Bug fix: identifiers in grammar rules that had digits in them were
-# not accessible in the {{python code}} section
-# * Bug fix: made the SyntaxError exception class call
-# Exception.__init__ (thanks Alex Verstak)
-# * Style change: replaced raise "string exception" with raise
-# ClassException(...) (thanks Alex Verstak)
-
-from yappsrt import *
-import sys
-import re
-
-
-INDENT = " " * 4
-
-
-class Generator:
- def __init__(self, name, options, tokens, rules):
- self.change_count = 0
- self.name = name
- self.options = options
- self.preparser = ''
- self.postparser = None
-
- self.tokens = {} # Map from tokens to regexps
- self.sets = {} # Map for restriction sets
- self.ignore = [] # List of token names to ignore in parsing
- self.terminals = [] # List of token names (to maintain ordering)
-
- for n, t in tokens:
- if n == '#ignore':
- n = t
- self.ignore.append(n)
- if n in self.tokens.keys() and self.tokens[n] != t:
- if n not in self.ignore:
- print 'Warning: token', n, 'multiply defined.'
- else:
- self.terminals.append(n)
- self.tokens[n] = t
-
- self.rules = {} # Map from rule names to parser nodes
- self.params = {} # Map from rule names to parameters
- self.goals = [] # List of rule names (to maintain ordering)
- for n, p, r in rules:
- self.params[n] = p
- self.rules[n] = r
- self.goals.append(n)
-
- self.output = sys.stdout
-
- def __getitem__(self, name):
- # Get options
- return self.options.get(name, 0)
-
- def non_ignored_tokens(self):
- return filter(lambda x, i=self.ignore: x not in i, self.terminals)
-
- def changed(self):
- self.change_count = 1 + self.change_count
-
- def subset(self, a, b):
- "See if all elements of a are inside b"
- for x in a:
- if x not in b:
- return 0
- return 1
-
- def equal_set(self, a, b):
- "See if a and b have the same elements"
- if len(a) != len(b):
- return 0
- if a == b:
- return 1
- return self.subset(a, b) and self.subset(b, a)
-
- def add_to(self, parent, additions):
- "Modify parent to include all elements in additions"
- for x in additions:
- if x not in parent:
- parent.append(x)
- self.changed()
-
- def equate(self, a, b):
- self.add_to(a, b)
- self.add_to(b, a)
-
- def write(self, *args):
- for a in args:
- self.output.write(a)
-
- def in_test(self, r, x, full, b):
- if not b:
- return '0'
- if len(b) == 1:
- return '%s == %s' % (x, repr(b[0]))
- if full and len(b) > len(full) / 2:
- # Reverse the sense of the test.
- not_b = filter(lambda x, b=b:
- x not in b, full)
- return self.not_in_test(r, x, full, not_b)
- n = None
- for k, v in self.sets.items():
- if v == b:
- n = k
- if n is None:
- n = '%s_chks' % r
- while n in self.sets:
- n += '_'
- self.sets[n] = b
- b_set = 'self.%s' % n
- return '%s in %s' % (x, b_set)
-
- def not_in_test(self, r, x, full, b):
- if not b:
- return '1'
- if len(b) == 1:
- return '%s != %s' % (x, repr(b[0]))
- n = None
- for k, v in self.sets.items():
- if v == b:
- n = k
- if n is None:
- n = '%s_chks' % r
- while n in self.sets:
- n += '_'
- self.sets[n] = b
- b_set = 'self.%s' % n
- return '%s not in %s' % (x, b_set)
-
- def peek_call(self, r, a):
- n = None
- for k, v in self.sets.items():
- if v == a:
- n = k
- if n is None:
- n = '%s_rsts' % r
- while n in self.sets:
- n += '_'
- self.sets[n] = a
- a_set = 'self.%s' % n
- if self.equal_set(a, self.non_ignored_tokens()):
- a_set = ''
- if self['context-insensitive-scanner']:
- a_set = ''
- return 'self._peek(%s)' % a_set
-
- def peek_test(self, r, a, b):
- if self.subset(a, b):
- return '1'
- if self['context-insensitive-scanner']:
- a = self.non_ignored_tokens()
- return self.in_test(r, self.peek_call(r, a), a, b)
-
- def not_peek_test(self, r, a, b):
- if self.subset(a, b):
- return '0'
- return self.not_in_test(r, self.peek_call(r, a), a, b)
-
- def calculate(self):
- while 1:
- for r in self.goals:
- self.rules[r].setup(self, r)
- if self.change_count == 0:
- break
- self.change_count = 0
-
- while 1:
- for r in self.goals:
- self.rules[r].update(self)
- if self.change_count == 0:
- break
- self.change_count = 0
-
- def dump_information(self):
- self.calculate()
- for r in self.goals:
- print ' _____' + '_' * len(r)
- print ('___/Rule ' + r + '\\' + '_' * 80)[:79]
- queue = [self.rules[r]]
- while queue:
- top = queue[0]
- del queue[0]
-
- print repr(top)
- top.first.sort()
- top.follow.sort()
- eps = []
- if top.accepts_epsilon:
- eps = ['(null)']
- print ' FIRST:', join(top.first + eps, ', ')
- print ' FOLLOW:', join(top.follow, ', ')
- for x in top.get_children():
- queue.append(x)
-
- def generate_output(self):
-
- self.calculate()
- self.write(self.preparser)
- # TODO: remove "import *" construct
- self.write("import re\n")
- self.write("from string import *\n")
- self.write("from yappsrt import *\n")
- self.write("\n\n")
- self.write("class ", self.name, "Scanner(Scanner):\n")
- self.write(INDENT, "patterns = None\n")
- self.write(INDENT, "_patterns = [\n")
- for p in self.terminals:
- self.write(INDENT*2, "(%s, %s),\n" % (
- repr(p), repr(self.tokens[p])))
- self.write(INDENT, "]\n\n")
- self.write(INDENT, "def __init__(self, input=None):\n")
- self.write(INDENT*2, "if hasattr(self, 'setup_patterns'):\n")
- self.write(INDENT*3, "self.setup_patterns(self._patterns)\n")
- self.write(INDENT*2, "elif self.patterns is None:\n")
- self.write(INDENT*3, "self.__class__.patterns = []\n")
- self.write(INDENT*3, "for t, p in self._patterns:\n")
- self.write(INDENT*4, "self.patterns.append((t, re.compile(p)))\n")
- self.write(INDENT*2, "super(", self.name, "Scanner, self).__init__(None, %s, input)\n" %
- repr(self.ignore))
- self.write("\n\n")
-
- self.write("class ", self.name, "(Parser):\n")
- for r in self.goals:
- self.write(INDENT, "def ", r, "(self")
- if self.params[r]:
- self.write(", ", self.params[r])
- self.write("):\n")
- self.rules[r].output(self, INDENT + INDENT)
- self.write("\n")
-
- for n, s in self.sets.items():
- self.write(" %s = %s\n" % (n, set(s)))
-
- if self.postparser is not None:
- self.write(self.postparser)
- else:
- self.write("\n")
- self.write("P = ", self.name, "(", self.name, "Scanner())\n")
- self.write("def parse(rule, text, *args):\n")
- self.write(" P.reset(text)\n")
- self.write(" return wrap_error_reporter(P, rule, *args)\n")
- self.write("\n")
-
- self.write("if __name__ == '__main__':\n")
- self.write(INDENT, "from sys import argv, stdin\n")
- self.write(INDENT, "if len(argv) >= 2:\n")
- self.write(INDENT * 2, "if len(argv) >= 3:\n")
- self.write(INDENT * 3, "f = open(argv[2],'r')\n")
- self.write(INDENT * 2, "else:\n")
- self.write(INDENT * 3, "f = stdin\n")
- self.write(INDENT * 2, "print parse(argv[1], f.read())\n")
- self.write(INDENT, "else: print 'Args: <rule> [<filename>]'\n")
-
-
-######################################################################
-
-
-class Node:
- def __init__(self):
- self.first = []
- self.follow = []
- self.accepts_epsilon = 0
- self.rule = '?'
-
- def setup(self, gen, rule):
- # Setup will change accepts_epsilon,
- # sometimes from 0 to 1 but never 1 to 0.
- # It will take a finite number of steps to set things up
- self.rule = rule
-
- def used(self, vars):
- "Return two lists: one of vars used, and the other of vars assigned"
- return vars, []
-
- def get_children(self):
- "Return a list of sub-nodes"
- return []
-
- def __repr__(self):
- return str(self)
-
- def update(self, gen):
- if self.accepts_epsilon:
- gen.add_to(self.first, self.follow)
-
- def output(self, gen, indent):
- "Write out code to _gen_ with _indent_:string indentation"
- gen.write(indent, "assert 0 # Invalid parser node\n")
-
-
-class Terminal(Node):
- def __init__(self, token):
- Node.__init__(self)
- self.token = token
- self.accepts_epsilon = 0
-
- def __str__(self):
- return self.token
-
- def update(self, gen):
- Node.update(self, gen)
- if self.first != [self.token]:
- self.first = [self.token]
- gen.changed()
-
- def output(self, gen, indent):
- gen.write(indent)
- if re.match('[a-zA-Z_][a-zA-Z_0-9]*$', self.token):
- gen.write(self.token, " = ")
- gen.write("self._scan(%s)\n" % repr(self.token))
-
-
-class Eval(Node):
- def __init__(self, expr):
- Node.__init__(self)
- self.expr = expr
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- if not self.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def __str__(self):
- return '{{ %s }}' % self.expr.strip()
-
- def output(self, gen, indent):
- gen.write(indent, self.expr.strip(), '\n')
-
-
-class NonTerminal(Node):
- def __init__(self, name, args):
- Node.__init__(self)
- self.name = name
- self.args = args
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- try:
- self.target = gen.rules[self.name]
- if self.accepts_epsilon != self.target.accepts_epsilon:
- self.accepts_epsilon = self.target.accepts_epsilon
- gen.changed()
- except KeyError: # Oops, it's nonexistent
- print 'Error: no rule <%s>' % self.name
- self.target = self
-
- def __str__(self):
- return '<%s>' % self.name
-
- def update(self, gen):
- Node.update(self, gen)
- gen.equate(self.first, self.target.first)
- gen.equate(self.follow, self.target.follow)
-
- def output(self, gen, indent):
- gen.write(indent)
- gen.write(self.name, " = ")
- gen.write("self.", self.name, "(", self.args, ")\n")
-
-
-class Sequence(Node):
- def __init__(self, *children):
- Node.__init__(self)
- self.children = children
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- for c in self.children:
- c.setup(gen, rule)
-
- if not self.accepts_epsilon:
- # If it's not already accepting epsilon, it might now do so.
- for c in self.children:
- # any non-epsilon means all is non-epsilon
- if not c.accepts_epsilon:
- break
- else:
- self.accepts_epsilon = 1
- gen.changed()
-
- def get_children(self):
- return self.children
-
- def __str__(self):
- return '( %s )' % join(map(lambda x: str(x), self.children))
-
- def update(self, gen):
- Node.update(self, gen)
- for g in self.children:
- g.update(gen)
-
- empty = 1
- for g_i in range(len(self.children)):
- g = self.children[g_i]
-
- if empty:
- gen.add_to(self.first, g.first)
- if not g.accepts_epsilon:
- empty = 0
-
- if g_i == len(self.children) - 1:
- next = self.follow
- else:
- next = self.children[1 + g_i].first
- gen.add_to(g.follow, next)
-
- if self.children:
- gen.add_to(self.follow, self.children[-1].follow)
-
- def output(self, gen, indent):
- if self.children:
- for c in self.children:
- c.output(gen, indent)
- else:
- # Placeholder for empty sequences, just in case
- gen.write(indent, 'pass\n')
-
-class Choice(Node):
- def __init__(self, *children):
- Node.__init__(self)
- self.children = children
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- for c in self.children:
- c.setup(gen, rule)
-
- if not self.accepts_epsilon:
- for c in self.children:
- if c.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def get_children(self):
- return self.children
-
- def __str__(self):
- return '( %s )' % join(map(lambda x: str(x), self.children), ' | ')
-
- def update(self, gen):
- Node.update(self, gen)
- for g in self.children:
- g.update(gen)
-
- for g in self.children:
- gen.add_to(self.first, g.first)
- gen.add_to(self.follow, g.follow)
- for g in self.children:
- gen.add_to(g.follow, self.follow)
- if self.accepts_epsilon:
- gen.add_to(self.first, self.follow)
-
- def output(self, gen, indent):
- test = "if"
- gen.write(indent, "_token_ = ", gen.peek_call(self.rule, self.first), "\n")
- tokens_seen = []
- tokens_unseen = self.first[:]
- if gen['context-insensitive-scanner']:
- # Context insensitive scanners can return ANY token,
- # not only the ones in first.
- tokens_unseen = gen.non_ignored_tokens()
- for c in self.children:
- testset = c.first[:]
- removed = []
- for x in testset:
- if x in tokens_seen:
- testset.remove(x)
- removed.append(x)
- if x in tokens_unseen:
- tokens_unseen.remove(x)
- tokens_seen = tokens_seen + testset
- if removed:
- if not testset:
- print 'Error in rule', self.rule + ':', c, 'never matches.'
- else:
- print 'Warning:', self
- print ' * These tokens are being ignored:', join(removed, ', ')
- print ' due to previous choices using them.'
-
- if testset:
- if not tokens_unseen: # context sensitive scanners only!
- if test == 'if':
- # if it's the first AND last test, then
- # we can simply put the code without an if/else
- c.output(gen, indent)
- else:
- gen.write(indent, "else:")
- t = gen.in_test(self.rule, '', [], testset)
- if len(t) < 70 - len(indent):
- gen.write(" #", t)
- gen.write("\n")
- c.output(gen, indent + INDENT)
- else:
- gen.write(indent, test, " ",
- gen.in_test(self.rule, '_token_', tokens_unseen, testset),
- ":\n")
- c.output(gen, indent + INDENT)
- test = "elif"
-
- if gen['context-insensitive-scanner'] and tokens_unseen:
- gen.write(indent, "else:\n")
- gen.write(indent, INDENT, "raise SyntaxError(self._pos, ")
- gen.write("'Could not match ", self.rule, "')\n")
-
-
-class Wrapper(Node):
- def __init__(self, child):
- Node.__init__(self)
- self.child = child
-
- def setup(self, gen, rule):
- Node.setup(self, gen, rule)
- self.child.setup(gen, rule)
-
- def get_children(self):
- return [self.child]
-
- def update(self, gen):
- Node.update(self, gen)
- self.child.update(gen)
- gen.add_to(self.first, self.child.first)
- gen.equate(self.follow, self.child.follow)
-
-
-class Option(Wrapper):
- def setup(self, gen, rule):
- Wrapper.setup(self, gen, rule)
- if not self.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def __str__(self):
- return '[ %s ]' % str(self.child)
-
- def output(self, gen, indent):
- if self.child.accepts_epsilon:
- print 'Warning in rule', self.rule + ': contents may be empty.'
- gen.write(indent, "if %s:\n" %
- gen.peek_test(self.rule, self.first, self.child.first))
- self.child.output(gen, indent + INDENT)
-
-
-class Plus(Wrapper):
- def setup(self, gen, rule):
- Wrapper.setup(self, gen, rule)
- if self.accepts_epsilon != self.child.accepts_epsilon:
- self.accepts_epsilon = self.child.accepts_epsilon
- gen.changed()
-
- def __str__(self):
- return '%s+' % str(self.child)
-
- def update(self, gen):
- Wrapper.update(self, gen)
- gen.add_to(self.follow, self.first)
-
- def output(self, gen, indent):
- if self.child.accepts_epsilon:
- print 'Warning in rule', self.rule + ':'
- print ' * The repeated pattern could be empty. The resulting'
- print ' parser may not work properly.'
- gen.write(indent, "while 1:\n")
- self.child.output(gen, indent + INDENT)
- union = self.first[:]
- gen.add_to(union, self.follow)
- gen.write(indent + INDENT, "if %s:\n" %
- gen.not_peek_test(self.rule, union, self.child.first))
- gen.write(indent + INDENT * 2, "break\n")
-
-
-class Star(Plus):
- def setup(self, gen, rule):
- Wrapper.setup(self, gen, rule)
- if not self.accepts_epsilon:
- self.accepts_epsilon = 1
- gen.changed()
-
- def __str__(self):
- return '%s*' % str(self.child)
-
- def output(self, gen, indent):
- if self.child.accepts_epsilon:
- print 'Warning in rule', self.rule + ':'
- print ' * The repeated pattern could be empty. The resulting'
- print ' parser probably will not work properly.'
- gen.write(indent, "while %s:\n" %
- gen.peek_test(self.rule, self.follow, self.child.first))
- self.child.output(gen, indent + INDENT)
-
-######################################################################
-# The remainder of this file is from parsedesc.{g,py}
-
-
-def append(lst, x):
- "Imperative append"
- lst.append(x)
- return lst
-
-
-def add_inline_token(tokens, str):
- tokens.insert(0, (str, eval(str, {}, {})))
- return Terminal(str)
-
-
-def cleanup_choice(lst):
- if len(lst) == 0:
- return Sequence([])
- if len(lst) == 1:
- return lst[0]
- return apply(Choice, tuple(lst))
-
-
-def cleanup_sequence(lst):
- if len(lst) == 1:
- return lst[0]
- return apply(Sequence, tuple(lst))
-
-
-def cleanup_rep(node, rep):
- if rep == 'star':
- return Star(node)
- elif rep == 'plus':
- return Plus(node)
- else:
- return node
-
-
-def resolve_name(tokens, id, args):
- if id in map(lambda x: x[0], tokens):
- # It's a token
- if args:
- print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
- return Terminal(id)
- else:
- # It's a name, so assume it's a nonterminal
- return NonTerminal(id, args)
-
-
-from string import *
-from yappsrt import *
-
-
-class ParserDescriptionScanner(Scanner):
- def __init__(self, str):
- Scanner.__init__(self, [
- ('"rule"', 'rule'),
- ('"ignore"', 'ignore'),
- ('"token"', 'token'),
- ('"option"', 'option'),
- ('":"', ':'),
- ('"parser"', 'parser'),
- ('[ \011\015\012]+', '[ \011\015\012]+'),
- ('#.*?\015?\012', '#.*?\015?\012'),
- ('END', '$'),
- ('ATTR', '<<.+?>>'),
- ('STMT', '{{.+?}}'),
- ('ID', '[a-zA-Z_][a-zA-Z_0-9]*'),
- ('STR', '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'),
- ('LP', '\\('),
- ('RP', '\\)'),
- ('LB', '\\['),
- ('RB', '\\]'),
- ('OR', '[|]'),
- ('STAR', '[*]'),
- ('PLUS', '[+]'),
- ], ['[ \011\015\012]+', '#.*?\015?\012'], str)
-
-
-class ParserDescription(Parser):
- def Parser(self):
- self._scan('"parser"')
- ID = self._scan('ID')
- self._scan('":"')
- Options = self.Options()
- Tokens = self.Tokens()
- Rules = self.Rules(Tokens)
- END = self._scan('END')
- return Generator(ID, Options, Tokens, Rules)
-
- def Options(self):
- opt = {}
- while self._peek(set(['"option"', '"token"', '"ignore"', 'END', '"rule"'])) == '"option"':
- self._scan('"option"')
- self._scan('":"')
- Str = self.Str()
- opt[Str] = 1
- return opt
-
- def Tokens(self):
- tok = []
- while self._peek(set(['"token"', '"ignore"', 'END', '"rule"'])) in ['"token"', '"ignore"']:
- _token_ = self._peek(set(['"token"', '"ignore"']))
- if _token_ == '"token"':
- self._scan('"token"')
- ID = self._scan('ID')
- self._scan('":"')
- Str = self.Str()
- tok.append((ID, Str))
- else: # == '"ignore"'
- self._scan('"ignore"')
- self._scan('":"')
- Str = self.Str()
- tok.append(('#ignore', Str))
- return tok
-
- def Rules(self, tokens):
- rul = []
- while self._peek(set(['"rule"', 'END'])) == '"rule"':
- self._scan('"rule"')
- ID = self._scan('ID')
- OptParam = self.OptParam()
- self._scan('":"')
- ClauseA = self.ClauseA(tokens)
- rul.append((ID, OptParam, ClauseA))
- return rul
-
- def ClauseA(self, tokens):
- ClauseB = self.ClauseB(tokens)
- v = [ClauseB]
- while self._peek(set(['OR', 'RP', 'RB', '"rule"', 'END'])) == 'OR':
- OR = self._scan('OR')
- ClauseB = self.ClauseB(tokens)
- v.append(ClauseB)
- return cleanup_choice(v)
-
- def ClauseB(self, tokens):
- v = []
- while self._peek(set(['STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END'])) in ['STR', 'ID', 'LP', 'LB', 'STMT']:
- ClauseC = self.ClauseC(tokens)
- v.append(ClauseC)
- return cleanup_sequence(v)
-
- def ClauseC(self, tokens):
- ClauseD = self.ClauseD(tokens)
- _token_ = self._peek(set(['PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END']))
- if _token_ == 'PLUS':
- PLUS = self._scan('PLUS')
- return Plus(ClauseD)
- elif _token_ == 'STAR':
- STAR = self._scan('STAR')
- return Star(ClauseD)
- else:
- return ClauseD
-
- def ClauseD(self, tokens):
- _token_ = self._peek(set(['STR', 'ID', 'LP', 'LB', 'STMT']))
- if _token_ == 'STR':
- STR = self._scan('STR')
- t = (STR, eval(STR, {}, {}))
- if t not in tokens:
- tokens.insert(0, t)
- return Terminal(STR)
- elif _token_ == 'ID':
- ID = self._scan('ID')
- OptParam = self.OptParam()
- return resolve_name(tokens, ID, OptParam)
- elif _token_ == 'LP':
- LP = self._scan('LP')
- ClauseA = self.ClauseA(tokens)
- RP = self._scan('RP')
- return ClauseA
- elif _token_ == 'LB':
- LB = self._scan('LB')
- ClauseA = self.ClauseA(tokens)
- RB = self._scan('RB')
- return Option(ClauseA)
- else: # == 'STMT'
- STMT = self._scan('STMT')
- return Eval(STMT[2:-2])
-
- def OptParam(self):
- if self._peek(set(['ATTR', '":"', 'PLUS', 'STAR', 'STR', 'ID', 'LP', 'LB', 'STMT', 'OR', 'RP', 'RB', '"rule"', 'END'])) == 'ATTR':
- ATTR = self._scan('ATTR')
- return ATTR[2:-2]
- return ''
-
- def Str(self):
- STR = self._scan('STR')
- return eval(STR, {}, {})
-
-
-# This replaces the default main routine
-
-
-yapps_options = [
- ('context-insensitive-scanner', 'context-insensitive-scanner',
- 'Scan all tokens (see docs)')
- ]
-
-
-def generate(inputfilename, outputfilename='', dump=0, **flags):
- """Generate a grammar, given an input filename (X.g)
- and an output filename (defaulting to X.py)."""
-
- if not outputfilename:
- if inputfilename[-2:] == '.g':
- outputfilename = inputfilename[:-2] + '.py'
- else:
- raise Exception("Missing output filename")
-
- print 'Input Grammar:', inputfilename
- print 'Output File:', outputfilename
-
- DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
- preparser, postparser = None, None # Code before and after the parser desc
-
- # Read the entire file
- s = open(inputfilename, 'r').read()
-
- # See if there's a separation between the pre-parser and parser
- f = find(s, DIVIDER)
- if f >= 0:
- preparser, s = s[:f] + '\n\n', s[f + len(DIVIDER):]
-
- # See if there's a separation between the parser and post-parser
- f = find(s, DIVIDER)
- if f >= 0:
- s, postparser = s[:f], '\n\n' + s[f + len(DIVIDER):]
-
- # Create the parser and scanner
- p = ParserDescription(ParserDescriptionScanner(s))
- if not p:
- return
-
- # Now parse the file
- t = wrap_error_reporter(p, 'Parser')
- if not t:
- return # Error
- if preparser is not None:
- t.preparser = preparser
- if postparser is not None:
- t.postparser = postparser
-
- # Check the options
- for f in t.options.keys():
- for opt, _, _ in yapps_options:
- if f == opt:
- break
- else:
- print 'Warning: unrecognized option', f
- # Add command line options to the set
- for f in flags.keys():
- t.options[f] = flags[f]
-
- # Generate the output
- if dump:
- t.dump_information()
- else:
- t.output = open(outputfilename, 'w')
- t.generate_output()
-
-if __name__ == '__main__':
- import getopt
- optlist, args = getopt.getopt(sys.argv[1:], 'f:', ['dump'])
- if not args or len(args) > 2:
- print 'Usage:'
- print ' python', sys.argv[0], '[flags] input.g [output.py]'
- print 'Flags:'
- print (' --dump' + ' ' * 40)[:35] + 'Dump out grammar information'
- for flag, _, doc in yapps_options:
- print (' -f' + flag + ' ' * 40)[:35] + doc
- else:
- # Read in the options and create a list of flags
- flags = {}
- for opt in optlist:
- for flag, name, _ in yapps_options:
- if opt == ('-f', flag):
- flags[name] = 1
- break
- else:
- if opt == ('--dump', ''):
- flags['dump'] = 1
- else:
- print 'Warning: unrecognized option', opt[0], opt[1]
-
- apply(generate, tuple(args), flags)
diff --git a/scss/src/grammar/yappsrt.py b/scss/src/grammar/yappsrt.py
deleted file mode 100644
index e7c18cd..0000000
--- a/scss/src/grammar/yappsrt.py
+++ /dev/null
@@ -1,275 +0,0 @@
-# Yapps 3.0 Runtime (by Kronuz)
-#
-# This module is needed to run generated parsers.
-
-import re
-
-try:
- from _scss import Scanner, NoMoreTokens
-except ImportError:
- Scanner = None
-
-################################################################################
-# Parser
-
-if not Scanner:
- class NoMoreTokens(Exception):
- """
- Another exception object, for when we run out of tokens
- """
- pass
-
- class Scanner(object):
- def __init__(self, patterns, ignore, input=None):
- """
- Patterns is [(terminal,regex)...]
- Ignore is [terminal,...];
- Input is a string
- """
- self.reset(input)
- self.ignore = ignore
- # The stored patterns are a pair (compiled regex,source
- # regex). If the patterns variable passed in to the
- # constructor is None, we assume that the class already has a
- # proper .patterns list constructed
- if patterns is not None:
- self.patterns = []
- for k, r in patterns:
- self.patterns.append((k, re.compile(r)))
-
- def reset(self, input):
- self.tokens = []
- self.restrictions = []
- self.input = input
- self.pos = 0
-
- def __repr__(self):
- """
- Print the last 10 tokens that have been scanned in
- """
- output = ''
- for t in self.tokens[-10:]:
- output = "%s\n (@%s) %s = %s" % (output, t[0], t[2], repr(t[3]))
- return output
-
- def _scan(self, restrict):
- """
- Should scan another token and add it to the list, self.tokens,
- and add the restriction to self.restrictions
- """
- # Keep looking for a token, ignoring any in self.ignore
- token = None
- while True:
- best_pat = None
- # Search the patterns for a match, with earlier
- # tokens in the list having preference
- best_pat_len = 0
- for p, regexp in self.patterns:
- # First check to see if we're restricting to this token
- if restrict and p not in restrict and p not in self.ignore:
- continue
- m = regexp.match(self.input, self.pos)
- if m:
- # We got a match
- best_pat = p
- best_pat_len = len(m.group(0))
- break
-
- # If we didn't find anything, raise an error
- if best_pat is None:
- msg = "Bad Token"
- if restrict:
- msg = "Trying to find one of " + ", ".join(restrict)
- raise SyntaxError("SyntaxError[@ char %s: %s]" % (repr(self.pos), msg))
-
- # If we found something that isn't to be ignored, return it
- if best_pat in self.ignore:
- # This token should be ignored...
- self.pos += best_pat_len
- else:
- end_pos = self.pos + best_pat_len
- # Create a token with this data
- token = (
- self.pos,
- end_pos,
- best_pat,
- self.input[self.pos:end_pos]
- )
- break
- if token is not None:
- self.pos = token[1]
- # Only add this token if it's not in the list
- # (to prevent looping)
- if not self.tokens or token != self.tokens[-1]:
- self.tokens.append(token)
- self.restrictions.append(restrict)
- return 1
- return 0
-
- def token(self, i, restrict=None):
- """
- Get the i'th token, and if i is one past the end, then scan
- for another token; restrict is a list of tokens that
- are allowed, or 0 for any token.
- """
- tokens_len = len(self.tokens)
- if i == tokens_len: # We are at the end, get the next...
- tokens_len += self._scan(restrict)
- if i < tokens_len:
- if restrict and self.restrictions[i] and restrict > self.restrictions[i]:
- raise NotImplementedError("Unimplemented: restriction set changed")
- return self.tokens[i]
- raise NoMoreTokens
-
- def rewind(self, i):
- tokens_len = len(self.tokens)
- if i <= tokens_len:
- token = self.tokens[i]
- self.tokens = self.tokens[:i]
- self.restrictions = self.restrictions[:i]
- self.pos = token[0]
-
-
-class CachedScanner(Scanner):
- """
- Same as Scanner, but keeps cached tokens for any given input
- """
- _cache_ = {}
- _goals_ = ['END']
-
- @classmethod
- def cleanup(cls):
- cls._cache_ = {}
-
- def __init__(self, patterns, ignore, input=None):
- try:
- self._tokens = self._cache_[input]
- except KeyError:
- self._tokens = None
- self.__tokens = {}
- self.__input = input
- super(CachedScanner, self).__init__(patterns, ignore, input)
-
- def reset(self, input):
- try:
- self._tokens = self._cache_[input]
- except KeyError:
- self._tokens = None
- self.__tokens = {}
- self.__input = input
- super(CachedScanner, self).reset(input)
-
- def __repr__(self):
- if self._tokens is None:
- return super(CachedScanner, self).__repr__()
- output = ''
- for t in self._tokens[-10:]:
- output = "%s\n (@%s) %s = %s" % (output, t[0], t[2], repr(t[3]))
- return output
-
- def token(self, i, restrict=None):
- if self._tokens is None:
- token = super(CachedScanner, self).token(i, restrict)
- self.__tokens[i] = token
- if token[2] in self._goals_: # goal tokens
- self._cache_[self.__input] = self._tokens = self.__tokens
- return token
- else:
- token = self._tokens.get(i)
- if token is None:
- raise NoMoreTokens
- return token
-
- def rewind(self, i):
- if self._tokens is None:
- super(CachedScanner, self).rewind(i)
-
-
-class Parser(object):
- def __init__(self, scanner):
- self._scanner = scanner
- self._pos = 0
-
- def reset(self, input):
- self._scanner.reset(input)
- self._pos = 0
-
- def _peek(self, types):
- """
- Returns the token type for lookahead; if there are any args
- then the list of args is the set of token types to allow
- """
- tok = self._scanner.token(self._pos, types)
- return tok[2]
-
- def _scan(self, type):
- """
- Returns the matched text, and moves to the next token
- """
- tok = self._scanner.token(self._pos, set([type]))
- if tok[2] != type:
- raise SyntaxError("SyntaxError[@ char %s: %s]" % (repr(tok[0]), "Trying to find " + type))
- self._pos += 1
- return tok[3]
-
- def _rewind(self, n=1):
- self._pos -= min(n, self._pos)
- self._scanner.rewind(self._pos)
-
-
-################################################################################
-
-
-def print_error(input, err, scanner):
- """This is a really dumb long function to print error messages nicely."""
- p = err.pos
- # Figure out the line number
- line = input[:p].count('\n')
- print err.msg + " on line " + repr(line + 1) + ":"
- # Now try printing part of the line
- text = input[max(p - 80, 0):
- p + 80]
- p = p - max(p - 80, 0)
-
- # Strip to the left
- i = text[:p].rfind('\n')
- j = text[:p].rfind('\r')
- if i < 0 or (0 <= j < i):
- i = j
- if 0 <= i < p:
- p = p - i - 1
- text = text[i + 1:]
-
- # Strip to the right
- i = text.find('\n', p)
- j = text.find('\r', p)
- if i < 0 or (0 <= j < i):
- i = j
- if i >= 0:
- text = text[:i]
-
- # Now shorten the text
- while len(text) > 70 and p > 60:
- # Cut off 10 chars
- text = "..." + text[10:]
- p = p - 7
-
- # Now print the string, along with an indicator
- print '> ', text
- print '> ', ' ' * p + '^'
- print 'List of nearby tokens:', scanner
-
-
-def wrap_error_reporter(parser, rule, *args):
- try:
- return getattr(parser, rule)(*args)
- except SyntaxError, s:
- input = parser._scanner.input
- try:
- print_error(input, s, parser._scanner)
- raise
- except ImportError:
- print "Syntax Error %s on line %d" % (s.msg, input[:s.pos].count('\n') + 1)
- except NoMoreTokens:
- print "Could not complete parsing; stopped around here:"
- print parser._scanner
diff --git a/scss/src/grammar/LICENSE b/scss/src/yapps/LICENSE
index 64f38b8..64f38b8 100644
--- a/scss/src/grammar/LICENSE
+++ b/scss/src/yapps/LICENSE
diff --git a/scss/src/grammar/README b/scss/src/yapps/README
index 3bd8f99..3bd8f99 100644
--- a/scss/src/grammar/README
+++ b/scss/src/yapps/README
diff --git a/scss/src/yapps/__init__.py b/scss/src/yapps/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/scss/src/yapps/__init__.py
diff --git a/scss/src/yapps/cli_tool.py b/scss/src/yapps/cli_tool.py
new file mode 100755
index 0000000..bfec4d3
--- /dev/null
+++ b/scss/src/yapps/cli_tool.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+
+#
+# Yapps 2 - yet another python parser system
+# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
+#
+# This version of Yapps 2 can be distributed under the
+# terms of the MIT open source license, either found in the LICENSE file
+# included with the Yapps distribution
+# <http://theory.stanford.edu/~amitp/yapps/> or at
+# <http://www.opensource.org/licenses/mit-license.php>
+#
+
+import os
+import sys
+
+try:
+ from yapps import runtime
+ from yapps import parsetree
+ from yapps import grammar
+except ImportError:
+ try:
+ import runtime
+ import parsetree
+ import grammar
+ except ImportError:
+ # For running binary from a checkout-path directly
+ if os.path.isfile('yapps/__init__.py'):
+ sys.path.append('.')
+ from yapps import runtime
+ from yapps import parsetree
+ from yapps import grammar
+ else:
+ raise
+
+
+def generate(inputfilename, outputfilename=None, dump=0, **flags):
+ """Generate a grammar, given an input filename (X.g)
+ and an output filename (defaulting to X.py)."""
+
+ if not outputfilename:
+ if inputfilename.endswith('.g'):
+ outputfilename = inputfilename[:-2] + '.py'
+ else:
+ raise Exception('Must specify output filename if input filename is not *.g')
+
+ DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers
+ preparser, postparser = None, None # Code before and after the parser desc
+
+ # Read the entire file
+ s = open(inputfilename, 'r').read()
+
+ # See if there's a separation between the pre-parser and parser
+ f = s.find(DIVIDER)
+ if f >= 0:
+ preparser, s = s[:f] + '\n\n', s[f + len(DIVIDER):]
+
+ # See if there's a separation between the parser and post-parser
+ f = s.find(DIVIDER)
+ if f >= 0:
+ s, postparser = s[:f] + '\n\n', s[f + len(DIVIDER):]
+
+ # Create the parser and scanner and parse the text
+ scanner = grammar.ParserDescriptionScanner(s, filename=inputfilename)
+ if preparser:
+ scanner.del_line += preparser.count('\n')
+
+ parser = grammar.ParserDescription(scanner)
+ t = runtime.wrap_error_reporter(parser, 'Parser')
+ if t is None:
+ return 1 # Failure
+ if preparser is not None:
+ t.preparser = preparser
+ if postparser is not None:
+ t.postparser = postparser
+
+ # Add command line options to the set
+ t.options.update(flags)
+
+ # Generate the output
+ if dump:
+ t.dump_information()
+ else:
+ t.output = open(outputfilename, 'w')
+ t.generate_output()
+ return 0
+
+
+def main(argv=None):
+ import doctest
+ doctest.testmod(sys.modules['__main__'])
+ doctest.testmod(parsetree)
+
+ import argparse
+ parser = argparse.ArgumentParser(
+ description='Generate python parser code from grammar description file.')
+ parser.add_argument('grammar_path', help='Path to grammar description file (input).')
+ parser.add_argument('parser_path', nargs='?',
+ help='Path to output file to be generated.'
+ ' Input path, but with .py will be used, if omitted.')
+ parser.add_argument('-i', '--context-insensitive-scanner',
+ action='store_true', help='Scan all tokens (see docs).')
+ parser.add_argument('-t', '--indent-with-tabs', action='store_true',
+ help='Use tabs instead of four spaces for indentation in generated code.')
+ parser.add_argument('--dump', action='store_true', help='Dump out grammar information.')
+ optz = parser.parse_args(argv if argv is not None else sys.argv[1:])
+
+ parser_flags = dict()
+ for k in 'dump', 'context_insensitive_scanner':
+ if getattr(optz, k, False):
+ parser_flags[k] = True
+ if optz.indent_with_tabs:
+ parsetree.INDENT = '\t' # not the cleanest way
+
+ sys.exit(generate(optz.grammar_path, optz.parser_path, **parser_flags))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scss/src/yapps/grammar.py b/scss/src/yapps/grammar.py
new file mode 100644
index 0000000..fa12e4c
--- /dev/null
+++ b/scss/src/yapps/grammar.py
@@ -0,0 +1,262 @@
+# grammar.py, part of Yapps 2 - yet another python parser system
+# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
+# Enhancements copyright 2003-2004 by Matthias Urlichs <smurf@debian.org>
+#
+# This version of the Yapps 2 grammar can be distributed under the
+# terms of the MIT open source license, either found in the LICENSE
+# file included with the Yapps distribution
+# <http://theory.stanford.edu/~amitp/yapps/> or at
+# <http://www.opensource.org/licenses/mit-license.php>
+#
+
+"""Parser for Yapps grammars.
+
+This file defines the grammar of Yapps grammars. Naturally, it is
+implemented in Yapps. The grammar.py module needed by Yapps is built
+by running Yapps on yapps_grammar.g. (Holy circularity, Batman!)
+
+"""
+
+try:
+ from yapps import parsetree
+except ImportError:
+ import parsetree
+
+
+######################################################################
+def cleanup_choice(rule, lst):
+ if len(lst) == 0:
+ return parsetree.Sequence(rule, [])
+ if len(lst) == 1:
+ return lst[0]
+ return parsetree.Choice(rule, *tuple(lst))
+
+
+def cleanup_sequence(rule, lst):
+ if len(lst) == 1:
+ return lst[0]
+ return parsetree.Sequence(rule, *tuple(lst))
+
+
+def resolve_name(rule, tokens, id, args):
+ if id in [x[0] for x in tokens]:
+ # It's a token
+ if args:
+ print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
+ return parsetree.Terminal(rule, id)
+ else:
+ # It's a name, so assume it's a nonterminal
+ return parsetree.NonTerminal(rule, id, args)
+
+
+# Begin -- grammar generated by Yapps
+import re
+try:
+ from yapps.runtime import Scanner, Parser, Context, wrap_error_reporter
+except ImportError:
+ from runtime import Scanner, Parser, Context, wrap_error_reporter
+
+
+class ParserDescriptionScanner(Scanner):
+ patterns = None
+ _patterns = [
+ ('"rule"', 'rule'),
+ ('"ignore"', 'ignore'),
+ ('"token"', 'token'),
+ ('"option"', 'option'),
+ ('":"', ':'),
+ ('"parser"', 'parser'),
+ ('[ \t\r\n]+', '[ \t\r\n]+'),
+ ('#.*?\r?\n', '#.*?\r?\n'),
+ ('EOF', '$'),
+ ('ATTR', '<<.+?>>'),
+ ('STMT', '{{.+?}}'),
+ ('ID', '[a-zA-Z_][a-zA-Z_0-9]*'),
+ ('STR', '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'),
+ ('LP', '\\('),
+ ('RP', '\\)'),
+ ('LB', '\\['),
+ ('RB', '\\]'),
+ ('OR', '[|]'),
+ ('STAR', '[*]'),
+ ('PLUS', '[+]'),
+ ('QUEST', '[?]'),
+ ('COLON', ':'),
+ ]
+
+ def __init__(self, str, *args, **kw):
+ if hasattr(self, 'setup_patterns'):
+ self.setup_patterns(self._patterns)
+ elif self.patterns is None:
+ self.__class__.patterns = []
+ for t, p in self._patterns:
+ self.patterns.append((t, re.compile(p)))
+ super(ParserDescriptionScanner, self).__init__(None, {'[ \t\r\n]+': None, '#.*?\r?\n': None, }, str, *args, **kw)
+
+
+class ParserDescription(Parser):
+ Context = Context
+
+ def Parser(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Parser', [])
+ self._scan('"parser"', context=_context)
+ ID = self._scan('ID', context=_context)
+ self._scan('":"', context=_context)
+ Options = self.Options(_context)
+ Tokens = self.Tokens(_context)
+ Rules = self.Rules(Tokens, _context)
+ EOF = self._scan('EOF', context=_context)
+ return parsetree.Generator(ID, Options, Tokens, Rules)
+
+ def Options(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Options', [])
+ opt = {}
+ while self._peek(self._option_ignore_rule_EOF_token, context=_context) == self._option:
+ self._scan('"option"', context=_context)
+ self._scan('":"', context=_context)
+ Str = self.Str(_context)
+ opt[Str] = 1
+ return opt
+
+ def Tokens(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Tokens', [])
+ tok = []
+ while self._peek(self._ignore_rule_EOF_token, context=_context) in self._ignore_token:
+ _token = self._peek(self._ignore_token, context=_context)
+ if _token == self._token:
+ self._scan('"token"', context=_context)
+ ID = self._scan('ID', context=_context)
+ self._scan('":"', context=_context)
+ Str = self.Str(_context)
+ tid = (ID, Str)
+ if self._peek(self._ignore_rule_token_STMT_EOF, context=_context) == self._STMT:
+ STMT = self._scan('STMT', context=_context)
+ tid += (STMT[2:-2],)
+ tok.append(tid)
+ else: # == self._ignore
+ self._scan('"ignore"', context=_context)
+ self._scan('":"', context=_context)
+ Str = self.Str(_context)
+ ign = ('#ignore', Str)
+ if self._peek(self._ignore_rule_token_STMT_EOF, context=_context) == self._STMT:
+ STMT = self._scan('STMT', context=_context)
+ ign += (STMT[2:-2],)
+ tok.append(ign)
+ return tok
+
+ def Rules(self, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Rules', [tokens])
+ rul = []
+ while self._peek(self._rule_EOF, context=_context) == self._rule:
+ self._scan('"rule"', context=_context)
+ ID = self._scan('ID', context=_context)
+ OptParam = self.OptParam(_context)
+ self._scan('":"', context=_context)
+ ClauseA = self.ClauseA(ID, tokens, _context)
+ rul.append((ID, OptParam, ClauseA))
+ return rul
+
+ def ClauseA(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseA', [rule, tokens])
+ ClauseB = self.ClauseB(rule, tokens, _context)
+ v = [ClauseB]
+ while self._peek(self._rule_RP_OR_RB_EOF, context=_context) == self._OR:
+ OR = self._scan('OR', context=_context)
+ ClauseB = self.ClauseB(rule, tokens, _context)
+ v.append(ClauseB)
+ return cleanup_choice(rule, v)
+
+ def ClauseB(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseB', [rule, tokens])
+ v = []
+ while self._peek(self._LB_LP_rule_RB_STMT_ID_STR_RP__, context=_context) in self._STMT_LB_ID_STR_LP:
+ ClauseC = self.ClauseC(rule, tokens, _context)
+ v.append(ClauseC)
+ return cleanup_sequence(rule, v)
+
+ def ClauseC(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseC', [rule, tokens])
+ ClauseD = self.ClauseD(rule, tokens, _context)
+ _token = self._peek(self._RB_STAR_LP_rule_RP_STMT_OR_QU_, context=_context)
+ if _token == self._PLUS:
+ PLUS = self._scan('PLUS', context=_context)
+ return parsetree.Plus(rule, ClauseD)
+ elif _token == self._STAR:
+ STAR = self._scan('STAR', context=_context)
+ return parsetree.Star(rule, ClauseD)
+ elif _token == self._QUEST:
+ QUEST = self._scan('QUEST', context=_context)
+ return parsetree.Option(rule, ClauseD)
+ else: # in self._LB_LP_rule_RB_STMT_ID_STR_RP__
+ return ClauseD
+
+ def ClauseD(self, rule, tokens, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'ClauseD', [rule, tokens])
+ _token = self._peek(self._STMT_LB_ID_STR_LP, context=_context)
+ if _token == self._STR:
+ STR = self._scan('STR', context=_context)
+ t = (STR, eval(STR, {}, {}))
+ if t not in tokens:
+ tokens.insert(0, t)
+ return parsetree.Terminal(rule, STR)
+ elif _token == self._ID:
+ ID = self._scan('ID', context=_context)
+ OptParam = self.OptParam(_context)
+ return resolve_name(rule, tokens, ID, OptParam)
+ elif _token == self._LP:
+ LP = self._scan('LP', context=_context)
+ ClauseA = self.ClauseA(rule, tokens, _context)
+ RP = self._scan('RP', context=_context)
+ return ClauseA
+ elif _token == self._LB:
+ LB = self._scan('LB', context=_context)
+ ClauseA = self.ClauseA(rule, tokens, _context)
+ RB = self._scan('RB', context=_context)
+ return parsetree.Option(rule, ClauseA)
+ else: # == self._STMT
+ STMT = self._scan('STMT', context=_context)
+ return parsetree.Eval(rule, STMT[2:-2])
+
+ def OptParam(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'OptParam', [])
+ if self._peek(self._RB_STAR_ATTR_LP_rule_RP_STMT__, context=_context) == self._ATTR:
+ ATTR = self._scan('ATTR', context=_context)
+ return ATTR[2:-2]
+ return ''
+
+ def Str(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'Str', [])
+ STR = self._scan('STR', context=_context)
+ return eval(STR, {}, {})
+
+ _RB_STAR_ATTR_LP_rule_RP_STMT__ = set(['RB', 'STAR', 'ATTR', 'LP', '"rule"', 'RP', 'STMT', 'OR', 'QUEST', 'PLUS', '":"', 'STR', 'LB', 'ID', 'EOF'])
+ _STR = 'STR'
+ _RB_STAR_LP_rule_RP_STMT_OR_QU_ = set(['RB', 'STAR', 'LP', '"rule"', 'RP', 'STMT', 'OR', 'QUEST', 'PLUS', 'STR', 'LB', 'ID', 'EOF'])
+ _LP = 'LP'
+ _STAR = 'STAR'
+ _ignore_rule_EOF_token = set(['"ignore"', '"rule"', 'EOF', '"token"'])
+ _ATTR = 'ATTR'
+ _rule = '"rule"'
+ _LB = 'LB'
+ _option = '"option"'
+ _STMT = 'STMT'
+ _token = '"token"'
+ _rule_RP_OR_RB_EOF = set(['"rule"', 'RP', 'OR', 'RB', 'EOF'])
+ _option_ignore_rule_EOF_token = set(['"option"', '"ignore"', '"rule"', 'EOF', '"token"'])
+ _rule_EOF = set(['"rule"', 'EOF'])
+ _ignore = '"ignore"'
+ _QUEST = 'QUEST'
+ _OR = 'OR'
+ _PLUS = 'PLUS'
+ _STMT_LB_ID_STR_LP = set(['STMT', 'LB', 'ID', 'STR', 'LP'])
+ _ignore_rule_token_STMT_EOF = set(['"ignore"', '"rule"', '"token"', 'STMT', 'EOF'])
+ _LB_LP_rule_RB_STMT_ID_STR_RP__ = set(['LB', 'LP', '"rule"', 'RB', 'STMT', 'ID', 'STR', 'RP', 'OR', 'EOF'])
+ _ID = 'ID'
+ _ignore_token = set(['"ignore"', '"token"'])
+
+
+def parse(rule, text):
+ P = ParserDescription(ParserDescriptionScanner(text))
+ return wrap_error_reporter(P, rule)
+
+# End -- grammar generated by Yapps
diff --git a/scss/src/yapps/parsetree.py b/scss/src/yapps/parsetree.py
new file mode 100644
index 0000000..24f12f2
--- /dev/null
+++ b/scss/src/yapps/parsetree.py
@@ -0,0 +1,774 @@
+# parsetree.py, part of Yapps 2 - yet another python parser system
+# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
+#
+# This version of the Yapps 2 Runtime can be distributed under the
+# terms of the MIT open source license, either found in the LICENSE file
+# included with the Yapps distribution
+# <http://theory.stanford.edu/~amitp/yapps/> or at
+# <http://www.opensource.org/licenses/mit-license.php>
+#
+
+"""Classes used to represent parse trees and generate output.
+
+This module defines the Generator class, which drives the generation
+of Python output from a grammar parse tree. It also defines nodes
+used to represent the parse tree; they are derived from class Node.
+
+The main logic of Yapps is in this module.
+"""
+
+import re
+import sys
+
+######################################################################
+INDENT = ' ' * 4
+
+
+class Generator:
+
+ # TODO: many of the methods here should be class methods, not instance methods
+
+ def __init__(self, name, options, tokens, rules):
+ self.change_count = 0
+ self.name = name
+ self.options = options
+ self.preparser = ''
+ self.postparser = None
+
+ self.tokens = {} # Map from tokens to regexps
+ self.sets = {} # Map for restriction sets
+ self.ignore = {} # List of token names to ignore in parsing, map to statements
+ self.terminals = [] # List of token names (to maintain ordering)
+ for t in tokens:
+ if len(t) == 3:
+ n, t, s = t
+ else:
+ n, t = t
+ s = None
+
+ if n == '#ignore':
+ n = t
+ self.ignore[n] = s
+ if n in self.tokens.keys() and self.tokens[n] != t:
+ print >>sys.stderr, 'Warning: token %s defined more than once.' % n
+ self.tokens[n] = t
+ self.terminals.append(n)
+
+ self.rules = {} # Map from rule names to parser nodes
+ self.params = {} # Map from rule names to parameters
+ self.goals = [] # List of rule names (to maintain ordering)
+ for n, p, r in rules:
+ self.params[n] = p
+ self.rules[n] = r
+ self.goals.append(n)
+
+ self.output = sys.stdout
+
+ def has_option(self, name):
+ return self.options.get(name, False)
+
+ def non_ignored_tokens(self):
+ return [x for x in self.terminals if x not in self.ignore]
+
+ def changed(self):
+ """Increments the change count.
+
+ >>> t = Generator('', [], [], [])
+ >>> old_count = t.change_count
+ >>> t.changed()
+ >>> assert t.change_count == old_count + 1
+ """
+ self.change_count = 1+self.change_count
+
+ def set_subtract(self, a, b):
+ """Returns the elements of a that are not in b.
+
+ >>> t = Generator('', [], [], [])
+ >>> t.set_subtract([], [])
+ []
+ >>> t.set_subtract([1, 2], [1, 2])
+ []
+ >>> t.set_subtract([1, 2, 3], [2])
+ [1, 3]
+ >>> t.set_subtract([1], [2, 3, 4])
+ [1]
+ """
+ result = []
+ for x in a:
+ if x not in b:
+ result.append(x)
+ return result
+
+ def subset(self, a, b):
+ """True iff all elements of sequence a are inside sequence b
+
+ >>> t = Generator('', [], [], [])
+ >>> t.subset([], [1, 2, 3])
+ 1
+ >>> t.subset([1, 2, 3], [])
+ 0
+ >>> t.subset([1], [1, 2, 3])
+ 1
+ >>> t.subset([3, 2, 1], [1, 2, 3])
+ 1
+ >>> t.subset([1, 1, 1], [1, 2, 3])
+ 1
+ >>> t.subset([1, 2, 3], [1, 1, 1])
+ 0
+ """
+ for x in a:
+ if x not in b:
+ return 0
+ return 1
+
+ def equal_set(self, a, b):
+ """True iff subset(a, b) and subset(b, a)
+
+ >>> t = Generator('', [], [], [])
+ >>> a_set = [1, 2, 3]
+ >>> t.equal_set(a_set, a_set)
+ 1
+ >>> t.equal_set(a_set, a_set[:])
+ 1
+ >>> t.equal_set([], a_set)
+ 0
+ >>> t.equal_set([1, 2, 3], [3, 2, 1])
+ 1
+ """
+ if len(a) != len(b):
+ return 0
+ if a == b:
+ return 1
+ return self.subset(a, b) and self.subset(b, a)
+
+ def add_to(self, parent, additions):
+ "Modify _parent_ to include all elements in _additions_"
+ for x in additions:
+ if x not in parent:
+ parent.append(x)
+ self.changed()
+
+ def equate(self, a, b):
+ """Extend (a) and (b) so that they contain each others' elements.
+
+ >>> t = Generator('', [], [], [])
+ >>> a = [1, 2]
+ >>> b = [2, 3]
+ >>> t.equate(a, b)
+ >>> a
+ [1, 2, 3]
+ >>> b
+ [2, 3, 1]
+ """
+ self.add_to(a, b)
+ self.add_to(b, a)
+
+ def write(self, *args):
+ for a in args:
+ self.output.write(a)
+
+ def in_test(self, expr, full, st, as_set=False):
+ """Generate a test of (expr) being in (st), where (st) is a subset of (full)
+
+ expr is a string (Python expression)
+ st is a list of values (which will be converted with repr)
+ full is the list of all values expr could possibly evaluate to
+
+ >>> t = Generator('', [], [], [])
+ >>> t.in_test('x', [1,2,3,4], [])
+ '0'
+ >>> t.in_test('x', [1,2,3,4], [1,2,3,4])
+ '1'
+ >>> t.in_test('x', [1,2,3,4], [1])
+ 'x == 1'
+ >>> t.in_test('a+b', [1,2,3,4], [1,2])
+ 'a+b in [1, 2]'
+ >>> t.in_test('x', [1,2,3,4,5], [1,2,3])
+ 'x not in [4, 5]'
+ >>> t.in_test('x', [1,2,3,4,5], [1,2,3,4])
+ 'x != 5'
+ """
+
+ if not st:
+ return '0'
+ if as_set:
+ n = None
+ for k, v in self.sets.items():
+ if set(v) == set(st):
+ n = k
+ if n is None:
+ n = '_' + re.sub(r'[^a-zA-Z_0-9]', '', '_'.join(set(st)))
+ if len(n) > 30:
+ n = n[:30] + '_'
+ while n in self.sets:
+ n += '_'
+ self.sets[n] = st
+ b_set = 'self.%s' % n
+ else:
+ if len(st) == 1:
+ b_set = "%s" % repr(st[0])
+ else:
+ b_set = "%s" % repr(st)
+ if len(st) == 1:
+ return '%s == %s' % (expr, b_set)
+ if full and len(st) > len(full) / 2:
+ # Reverse the sense of the test.
+ not_set = [x for x in full if x not in st]
+ return self.not_in_test(expr, full, not_set, as_set=as_set)
+ return '%s in %s' % (expr, b_set)
+
+ def not_in_test(self, expr, full, st, as_set=False):
+ """Like in_test, but the reverse test."""
+ if not st:
+ return '1'
+ if as_set:
+ n = None
+ for k, v in self.sets.items():
+ if set(v) == set(st):
+ n = k
+ if n is None:
+ n = '_' + re.sub(r'[^a-zA-Z_0-9]', '', '_'.join(set(st)))
+ if len(n) > 30:
+ n = n[:30] + '_'
+ while n in self.sets:
+ n += '_'
+ self.sets[n] = st
+ b_set = 'self.%s' % n
+ else:
+ if len(st) == 1:
+ b_set = "%s" % repr(st[0])
+ else:
+ b_set = "%s" % repr(st)
+ if len(st) == 1:
+ return '%s != %s' % (expr, b_set)
+ return '%s not in %s' % (expr, b_set)
+
+ def peek_call(self, st, as_set=False):
+ """Generate a call to scan for a token in the set 'st'"""
+ assert isinstance(st, list)
+ a_set = ''
+ if self.equal_set(st, self.non_ignored_tokens()):
+ st = None
+ if self.has_option('context_insensitive_scanner'):
+ st = None
+ if st:
+ if as_set:
+ n = None
+ for k, v in self.sets.items():
+ if set(v) == set(st):
+ n = k
+ if n is None:
+ n = '_' + re.sub(r'[^a-zA-Z_0-9]', '', '_'.join(set(st)))
+ if len(n) > 30:
+ n = n[:30] + '_'
+ while n in self.sets:
+ n += '_'
+ self.sets[n] = st
+ a_set = 'self.%s, ' % n
+ else:
+ a_set = "%s, " % repr(st)
+ return 'self._peek(%scontext=_context)' % a_set
+
+ def peek_test(self, a, b, as_set=False):
+ """Generate a call to test whether the next token (which could be any of
+ the elements in a) is in the set b."""
+ if self.subset(a, b):
+ return '1'
+ if self.has_option('context_insensitive_scanner'):
+ a = self.non_ignored_tokens()
+ return self.in_test(self.peek_call(a, as_set=as_set), a, b, as_set=as_set)
+
+ def not_peek_test(self, a, b, as_set=False):
+ """Like peek_test, but the opposite sense."""
+ if self.subset(a, b):
+ return '0'
+ return self.not_in_test(self.peek_call(a, as_set=as_set), a, b, as_set=as_set)
+
+ def calculate(self):
+ """The main loop to compute the epsilon, first, follow sets.
+ The loop continues until the sets converge. This works because
+ each set can only get larger, so when they stop getting larger,
+ we're done."""
+ # First we determine whether a rule accepts epsilon (the empty sequence)
+ while 1:
+ for r in self.goals:
+ self.rules[r].setup(self)
+ if self.change_count == 0:
+ break
+ self.change_count = 0
+
+ # Now we compute the first/follow sets
+ while 1:
+ for r in self.goals:
+ self.rules[r].update(self)
+ if self.change_count == 0:
+ break
+ self.change_count = 0
+
+ def dump_information(self):
+ """Display the grammar in somewhat human-readable form."""
+ self.calculate()
+ for r in self.goals:
+ print ' _____' + '_'*len(r)
+ print ('___/Rule '+r+'\\' + '_'*80)[:79]
+ queue = [self.rules[r]]
+ while queue:
+ top = queue[0]
+ del queue[0]
+
+ print 'Rule', repr(top), 'of class', top.__class__.__name__
+ top.first.sort()
+ top.follow.sort()
+ eps = []
+ if top.accepts_epsilon:
+ eps = ['(null)']
+ print ' FIRST:', ', '.join(top.first+eps)
+ print ' FOLLOW:', ', '.join(top.follow)
+ for x in top.get_children():
+ queue.append(x)
+
+ def repr_ignore(self):
+ out = "{"
+ for t, s in self.ignore.iteritems():
+ if s is None:
+ s = repr(s)
+ out += "%s: %s, " % (repr(t), s)
+ out += "}"
+ return out
+
+ def generate_output(self):
+ self.calculate()
+ self.write(self.preparser)
+ self.write("# Begin -- grammar generated by Yapps\n")
+ self.write("import re\n")
+ self.write("try:\n")
+ self.write(INDENT, "from yapps.runtime import Scanner, Parser, Context, wrap_error_reporter\n")
+ self.write("except ImportError:\n")
+ self.write(INDENT, "from runtime import Scanner, Parser, Context, wrap_error_reporter\n")
+ self.write("\n\n")
+ self.write("class ", self.name, "Scanner(Scanner):\n")
+ self.write(INDENT, "patterns = None\n")
+ self.write(INDENT, "_patterns = [\n")
+ for p in self.terminals:
+ self.write(INDENT*2, "(%s, %s),\n" % (
+ repr(p), repr(self.tokens[p])))
+ self.write(INDENT, "]\n\n")
+ self.write(INDENT, "def __init__(self, str, *args, **kw):\n")
+ self.write(INDENT*2, "if hasattr(self, 'setup_patterns'):\n")
+ self.write(INDENT*3, "self.setup_patterns(self._patterns)\n")
+ self.write(INDENT*2, "elif self.patterns is None:\n")
+ self.write(INDENT*3, "self.__class__.patterns = []\n")
+ self.write(INDENT*3, "for t, p in self._patterns:\n")
+ self.write(INDENT*4, "self.patterns.append((t, re.compile(p)))\n")
+ self.write(INDENT*2, "super(", self.name, "Scanner, self).__init__(None, %s, str, *args, **kw)\n" %
+ self.repr_ignore())
+ self.write("\n\n")
+
+ self.write("class ", self.name, "(Parser):\n")
+ self.write(INDENT, "Context = Context\n\n")
+ for r in self.goals:
+ self.write(INDENT, "def ", r, "(self")
+ if self.params[r]:
+ self.write(", ", self.params[r])
+ self.write(", _parent=None):\n")
+ self.write(INDENT*2, "_context = self.Context(_parent, self._scanner, %s, [%s])\n" %
+ (repr(r), self.params.get(r, '')))
+ self.rules[r].output(self, INDENT*2)
+ self.write("\n")
+
+ for n, s in self.sets.items():
+ self.write(" %s = %s\n" % (n, repr(s[0]) if len(s) == 1 else repr(set(s))))
+
+ self.write("\n\n")
+ self.write("def parse(rule, text):\n")
+ self.write(INDENT, "P = ", self.name, "(", self.name, "Scanner(text))\n")
+ self.write(INDENT, "return wrap_error_reporter(P, rule)\n")
+ self.write("\n")
+ if self.postparser is not None:
+ self.write("# End -- grammar generated by Yapps\n")
+ self.write(self.postparser)
+ else:
+ self.write("if __name__ == '__main__':\n")
+ self.write(INDENT, "from sys import argv, stdin\n")
+ self.write(INDENT, "if len(argv) >= 2:\n")
+ self.write(INDENT*2, "if len(argv) >= 3:\n")
+ self.write(INDENT*3, "f = open(argv[2], 'r')\n")
+ self.write(INDENT*2, "else:\n")
+ self.write(INDENT*3, "f = stdin\n")
+ self.write(INDENT*2, "print parse(argv[1], f.read())\n")
+ self.write(INDENT, "else:\n")
+ self.write(INDENT*2, "print >>sys.stderr, 'Args: <rule> [<filename>]'\n")
+ self.write("# End -- grammar generated by Yapps\n")
+
+
+######################################################################
+class Node:
+ """This is the base class for all components of a grammar."""
+ def __init__(self, rule):
+ self.rule = rule # name of the rule containing this node
+ self.first = []
+ self.follow = []
+ self.accepts_epsilon = 0
+
+ def setup(self, gen):
+ # Setup will change accepts_epsilon,
+ # sometimes from 0 to 1 but never 1 to 0.
+ # It will take a finite number of steps to set things up
+ pass
+
+ def used(self, vars):
+ "Return two lists: one of vars used, and the other of vars assigned"
+ return vars, []
+
+ def get_children(self):
+ "Return a list of sub-nodes"
+ return []
+
+ def __repr__(self):
+ return str(self)
+
+ def update(self, gen):
+ if self.accepts_epsilon:
+ gen.add_to(self.first, self.follow)
+
+ def output(self, gen, indent):
+ "Write out code to _gen_ with _indent_:string indentation"
+ gen.write(indent, "assert 0 # Invalid parser node\n")
+
+
+class Terminal(Node):
+ """This class stores terminal nodes, which are tokens."""
+ def __init__(self, rule, token):
+ Node.__init__(self, rule)
+ self.token = token
+ self.accepts_epsilon = 0
+
+ def __str__(self):
+ return self.token
+
+ def update(self, gen):
+ Node.update(self, gen)
+ if self.first != [self.token]:
+ self.first = [self.token]
+ gen.changed()
+
+ def output(self, gen, indent):
+ gen.write(indent)
+ if re.match('[a-zA-Z_][a-zA-Z_0-9]*$', self.token):
+ gen.write(self.token, " = ")
+ gen.write("self._scan(%s, context=_context)\n" % repr(self.token))
+
+
+class Eval(Node):
+ """This class stores evaluation nodes, from {{ ... }} clauses."""
+ def __init__(self, rule, expr):
+ Node.__init__(self, rule)
+ if expr[0] == ' ' and expr[0] == expr[-1]:
+ expr = expr[1:-1]
+ self.expr = expr
+
+ def setup(self, gen):
+ Node.setup(self, gen)
+ if not self.accepts_epsilon:
+ self.accepts_epsilon = 1
+ gen.changed()
+
+ def __str__(self):
+ return '{{ %s }}' % self.expr
+
+ def output(self, gen, indent):
+ gen.write(indent, self.expr, '\n')
+
+
+class NonTerminal(Node):
+ """This class stores nonterminal nodes, which are rules with arguments."""
+ def __init__(self, rule, name, args):
+ Node.__init__(self, rule)
+ self.name = name
+ self.args = args
+
+ def setup(self, gen):
+ Node.setup(self, gen)
+ try:
+ self.target = gen.rules[self.name]
+ if self.accepts_epsilon != self.target.accepts_epsilon:
+ self.accepts_epsilon = self.target.accepts_epsilon
+ gen.changed()
+ except KeyError: # Oops, it's nonexistent
+ print >>sys.stderr, 'Error: no rule <%s>' % self.name
+ self.target = self
+
+ def __str__(self):
+ return '%s' % self.name
+
+ def update(self, gen):
+ Node.update(self, gen)
+ gen.equate(self.first, self.target.first)
+ gen.equate(self.follow, self.target.follow)
+
+ def output(self, gen, indent):
+ gen.write(indent)
+ gen.write(self.name, " = ")
+ args = self.args
+ if args:
+ args += ', '
+ args += '_context'
+ gen.write("self.", self.name, "(", args, ")\n")
+
+
+class Sequence(Node):
+ """This class stores a sequence of nodes (A B C ...)"""
+ def __init__(self, rule, *children):
+ Node.__init__(self, rule)
+ self.children = children
+
+ def setup(self, gen):
+ Node.setup(self, gen)
+ for c in self.children:
+ c.setup(gen)
+
+ if not self.accepts_epsilon:
+ # If it's not already accepting epsilon, it might now do so.
+ for c in self.children:
+ # any non-epsilon means all is non-epsilon
+ if not c.accepts_epsilon:
+ break
+ else:
+ self.accepts_epsilon = 1
+ gen.changed()
+
+ def get_children(self):
+ return self.children
+
+ def __str__(self):
+ return '( %s )' % ' '.join(map(str, self.children))
+
+ def update(self, gen):
+ Node.update(self, gen)
+ for g in self.children:
+ g.update(gen)
+
+ empty = 1
+ for g_i in range(len(self.children)):
+ g = self.children[g_i]
+
+ if empty:
+ gen.add_to(self.first, g.first)
+ if not g.accepts_epsilon:
+ empty = 0
+
+ if g_i == len(self.children)-1:
+ next = self.follow
+ else:
+ next = self.children[1+g_i].first
+ gen.add_to(g.follow, next)
+
+ if self.children:
+ gen.add_to(self.follow, self.children[-1].follow)
+
+ def output(self, gen, indent):
+ if self.children:
+ for c in self.children:
+ c.output(gen, indent)
+ else:
+ # Placeholder for empty sequences, just in case
+ gen.write(indent, 'pass\n')
+
+
+class Choice(Node):
+ """This class stores a choice between nodes (A | B | C | ...)"""
+ def __init__(self, rule, *children):
+ Node.__init__(self, rule)
+ self.children = children
+
+ def setup(self, gen):
+ Node.setup(self, gen)
+ for c in self.children:
+ c.setup(gen)
+
+ if not self.accepts_epsilon:
+ for c in self.children:
+ if c.accepts_epsilon:
+ self.accepts_epsilon = 1
+ gen.changed()
+
+ def get_children(self):
+ return self.children
+
+ def __str__(self):
+ return '( %s )' % ' | '.join(map(str, self.children))
+
+ def update(self, gen):
+ Node.update(self, gen)
+ for g in self.children:
+ g.update(gen)
+
+ for g in self.children:
+ gen.add_to(self.first, g.first)
+ gen.add_to(self.follow, g.follow)
+ for g in self.children:
+ gen.add_to(g.follow, self.follow)
+ if self.accepts_epsilon:
+ gen.add_to(self.first, self.follow)
+
+ def output(self, gen, indent):
+ test = "if"
+ gen.write(indent, "_token = ", gen.peek_call(self.first, as_set=True), "\n")
+ tokens_seen = []
+ tokens_unseen = self.first[:]
+ if gen.has_option('context_insensitive_scanner'):
+ # Context insensitive scanners can return ANY token,
+ # not only the ones in first.
+ tokens_unseen = gen.non_ignored_tokens()
+ for c in self.children:
+ testset = c.first[:]
+ removed = []
+ for x in testset:
+ if x in tokens_seen:
+ testset.remove(x)
+ removed.append(x)
+ if x in tokens_unseen:
+ tokens_unseen.remove(x)
+ tokens_seen = tokens_seen + testset
+ if removed:
+ if not testset:
+ print >>sys.stderr, 'Error in rule', self.rule + ':'
+ else:
+ print >>sys.stderr, 'Warning in rule', self.rule + ':'
+ print >>sys.stderr, ' *', self
+ print >>sys.stderr, ' * These tokens could be matched by more than one clause:'
+ print >>sys.stderr, ' *', ' '.join(removed)
+
+ if testset:
+ if not tokens_unseen: # context sensitive scanners only!
+ if test == 'if':
+ # if it's the first AND last test, then
+ # we can simply put the code without an if/else
+ c.output(gen, indent)
+ else:
+ gen.write(indent, "else:")
+ t = gen.in_test('', [], testset, as_set=True)
+ if len(t) < 70 - len(indent):
+ gen.write(' #', t)
+ gen.write("\n")
+ c.output(gen, indent + INDENT)
+ else:
+ gen.write(indent, test, " ",
+ gen.in_test('_token', tokens_unseen, testset, as_set=True),
+ ":\n")
+ c.output(gen, indent + INDENT)
+ test = "elif"
+
+ if tokens_unseen:
+ gen.write(indent, "else:\n")
+ gen.write(indent, INDENT, "raise runtime.SyntaxError(_token[0], ")
+ gen.write("'Could not match ", self.rule, "')\n")
+
+
+class Wrapper(Node):
+ """This is a base class for nodes that modify a single child."""
+ def __init__(self, rule, child):
+ Node.__init__(self, rule)
+ self.child = child
+
+ def setup(self, gen):
+ Node.setup(self, gen)
+ self.child.setup(gen)
+
+ def get_children(self):
+ return [self.child]
+
+ def update(self, gen):
+ Node.update(self, gen)
+ self.child.update(gen)
+ gen.add_to(self.first, self.child.first)
+ gen.equate(self.follow, self.child.follow)
+
+
+class Option(Wrapper):
+ """This class represents an optional clause of the form [A]"""
+ def setup(self, gen):
+ Wrapper.setup(self, gen)
+ if not self.accepts_epsilon:
+ self.accepts_epsilon = 1
+ gen.changed()
+
+ def __str__(self):
+ return '[ %s ]' % str(self.child)
+
+ def output(self, gen, indent):
+ if self.child.accepts_epsilon:
+ print >>sys.stderr, 'Warning in rule', self.rule+': contents may be empty.'
+ gen.write(indent, "if %s:\n" %
+ gen.peek_test(self.first, self.child.first, as_set=True))
+ self.child.output(gen, indent + INDENT)
+
+ if gen.has_option('context_insensitive_scanner'):
+ gen.write(indent, "if %s:\n" %
+ gen.not_peek_test(gen.non_ignored_tokens(), self.follow, as_set=True))
+ gen.write(indent + INDENT, "raise runtime.SyntaxError(pos=self._scanner.get_pos(), context=_context, msg='Need one of ' + ', '.join(%s))\n" %
+ repr(self.first))
+
+
+class Plus(Wrapper):
+ """This class represents a 1-or-more repetition clause of the form A+"""
+ def setup(self, gen):
+ Wrapper.setup(self, gen)
+ if self.accepts_epsilon != self.child.accepts_epsilon:
+ self.accepts_epsilon = self.child.accepts_epsilon
+ gen.changed()
+
+ def __str__(self):
+ return '%s+' % str(self.child)
+
+ def update(self, gen):
+ Wrapper.update(self, gen)
+ gen.add_to(self.child.follow, self.child.first)
+
+ def output(self, gen, indent):
+ if self.child.accepts_epsilon:
+ print >>sys.stderr, 'Warning in rule', self.rule+':'
+ print >>sys.stderr, ' * The repeated pattern could be empty. The resulting parser may not work properly.'
+ gen.write(indent, "while 1:\n")
+ self.child.output(gen, indent + INDENT)
+ union = self.first[:]
+ gen.add_to(union, self.follow)
+ gen.write(indent + INDENT, "if %s: break\n" %
+ gen.not_peek_test(union, self.child.first, as_set=True))
+
+ if gen.has_option('context_insensitive_scanner'):
+ gen.write(indent, "if %s:\n" %
+ gen.not_peek_test(gen.non_ignored_tokens(), self.follow, as_set=True))
+ gen.write(indent + INDENT, "raise runtime.SyntaxError(pos=self._scanner.get_pos(), context=_context, msg='Need one of ' + ', '.join(%s))\n" %
+ repr(self.first))
+
+
+class Star(Wrapper):
+ """This class represents a 0-or-more repetition clause of the form A*"""
+ def setup(self, gen):
+ Wrapper.setup(self, gen)
+ if not self.accepts_epsilon:
+ self.accepts_epsilon = 1
+ gen.changed()
+
+ def __str__(self):
+ return '%s*' % str(self.child)
+
+ def update(self, gen):
+ Wrapper.update(self, gen)
+ gen.add_to(self.child.follow, self.child.first)
+
+ def output(self, gen, indent):
+ if self.child.accepts_epsilon:
+ print >>sys.stderr, 'Warning in rule', self.rule+':'
+ print >>sys.stderr, ' * The repeated pattern could be empty. The resulting parser probably will not work properly.'
+ gen.write(indent, "while %s:\n" %
+ gen.peek_test(self.follow, self.child.first, as_set=True))
+ self.child.output(gen, indent + INDENT)
+
+ # TODO: need to generate tests like this in lots of rules
+ if gen.has_option('context_insensitive_scanner'):
+ gen.write(indent, "if %s:\n" %
+ gen.not_peek_test(gen.non_ignored_tokens(), self.follow, as_set=True))
+ gen.write(indent + INDENT, "raise runtime.SyntaxError(pos=self._scanner.get_pos(), context=_context, msg='Need one of ' + ', '.join(%s))\n" %
+ repr(self.first))
diff --git a/scss/src/yapps/runtime.py b/scss/src/yapps/runtime.py
new file mode 100644
index 0000000..e23210a
--- /dev/null
+++ b/scss/src/yapps/runtime.py
@@ -0,0 +1,443 @@
+# Yapps 2 Runtime, part of Yapps 2 - yet another python parser system
+# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
+# Enhancements copyright 2003-2004 by Matthias Urlichs <smurf@debian.org>
+#
+# This version of the Yapps 2 Runtime can be distributed under the
+# terms of the MIT open source license, either found in the LICENSE file
+# included with the Yapps distribution
+# <http://theory.stanford.edu/~amitp/yapps/> or at
+# <http://www.opensource.org/licenses/mit-license.php>
+#
+
+"""Run time libraries needed to run parsers generated by Yapps.
+
+This module defines parse-time exception classes, a scanner class, a
+base class for parsers produced by Yapps, and a context class that
+keeps track of the parse stack.
+
+"""
+
+import re
+import sys
+
+DEBUG = False
+MIN_WINDOW = 4096
+# File lookup window
+
+
+class SyntaxError(Exception):
+ """When we run into an unexpected token, this is the exception to use"""
+ def __init__(self, pos=None, msg="Bad Token", context=None):
+ Exception.__init__(self)
+ self.pos = pos
+ self.msg = msg
+ self.context = context
+
+ def __str__(self):
+ if not self.pos:
+ return 'SyntaxError'
+ else:
+ return 'SyntaxError@%s(%s)' % (repr(self.pos), self.msg)
+
+
+class NoMoreTokens(Exception):
+ """Another exception object, for when we run out of tokens"""
+ pass
+
+
+class Token(object):
+ """Yapps token.
+
+ This is a container for a scanned token.
+ """
+
+ def __init__(self, type, value, pos=None):
+ """Initialize a token."""
+ self.type = type
+ self.value = value
+ self.pos = pos
+
+ def __repr__(self):
+ output = '<%s: %s' % (self.type, repr(self.value))
+ if self.pos:
+ output += " @ "
+ if self.pos[0]:
+ output += "%s:" % self.pos[0]
+ if self.pos[1]:
+ output += "%d" % self.pos[1]
+ if self.pos[2] is not None:
+ output += ".%d" % self.pos[2]
+ output += ">"
+ return output
+
+
+in_name = 0
+
+
+class Scanner(object):
+ """Yapps scanner.
+
+ The Yapps scanner can work in context sensitive or context
+ insensitive modes. The token(i) method is used to retrieve the
+ i-th token. It takes a restrict set that limits the set of tokens
+ it is allowed to return. In context sensitive mode, this restrict
+ set guides the scanner. In context insensitive mode, there is no
+ restriction (the set is always the full set of tokens).
+
+ """
+
+ def __init__(self, patterns, ignore, input="",
+ file=None, filename=None, stacked=False):
+ """Initialize the scanner.
+
+ Parameters:
+ patterns : [(terminal, uncompiled regex), ...] or None
+ ignore : {terminal:None, ...}
+ input : string
+
+ If patterns is None, we assume that the subclass has
+ defined self.patterns : [(terminal, compiled regex), ...].
+ Note that the patterns parameter expects uncompiled regexes,
+ whereas the self.patterns field expects compiled regexes.
+
+ The 'ignore' value is either None or a callable, which is called
+ with the scanner and the to-be-ignored match object; this can
+ be used for include file or comment handling.
+ """
+
+ if not filename:
+ global in_name
+ filename = "<f.%d>" % in_name
+ in_name += 1
+
+ self.reset(input, file, filename)
+ self.ignore = ignore
+ self.stacked = stacked
+
+ if patterns is not None:
+ # Compile the regex strings into regex objects
+ self.patterns = []
+ for terminal, regex in patterns:
+ self.patterns.append((terminal, re.compile(regex)))
+
+ def reset(self, input="", file=None, filename=None):
+ self.restrictions = []
+ self.input = input
+ self.file = file
+ self.filename = filename
+ self.pos = 0
+ self.del_pos = 0 # skipped
+ self.line = 1
+ self.del_line = 0 # skipped
+ self.col = 0
+ self.tokens = []
+
+ self.last_read_token = None
+ self.last_token = None
+ self.last_types = None
+
+ def __repr__(self):
+ """
+ Print the last 10 tokens that have been scanned in
+ """
+ output = ''
+ for t in self.tokens[-10:]:
+ output = "%s\n (@%s) %s = %s" % (output, t[0], t[2], repr(t[3]))
+ return output
+
+ def get_pos(self):
+ """Return a file/line/char tuple."""
+ return (self.filename, self.line + self.del_line, self.col)
+
+ def print_line_with_pointer(self, pos, length=0, out=sys.stderr):
+ """Print the line of 'text' that includes position 'p',
+ along with a second line with a single caret (^) at position p"""
+
+ file, line, p = pos
+ if file != self.filename:
+ print >>out, "(%s: not in input buffer)" % file
+ return
+
+ text = self.input
+ p += length - 1 # starts at pos 1
+
+ origline = line
+ line -= self.del_line
+ spos = 0
+ if line > 0:
+ while 1:
+ line = line - 1
+ try:
+ cr = text.index("\n", spos)
+ except ValueError:
+ if line:
+ text = ""
+ break
+ if line == 0:
+ text = text[spos:cr]
+ break
+ spos = cr+1
+ else:
+ print >>out, "(%s:%d not in input buffer)" % (file, origline)
+ return
+
+ # Now try printing part of the line
+ text = text[max(p - 80, 0):p + 80]
+ p = p - max(p - 80, 0)
+
+ # Strip to the left
+ i = text[:p].rfind('\n')
+ j = text[:p].rfind('\r')
+ if i < 0 or (0 <= j < i):
+ i = j
+ if 0 <= i < p:
+ p = p - i - 1
+ text = text[i+1:]
+
+ # Strip to the right
+ i = text.find('\n', p)
+ j = text.find('\r', p)
+ if i < 0 or (0 <= j < i):
+ i = j
+ if i >= 0:
+ text = text[:i]
+
+ # Now shorten the text
+ while len(text) > 70 and p > 60:
+ # Cut off 10 chars
+ text = "..." + text[10:]
+ p = p - 7
+
+ # Now print the string, along with an indicator
+ print >>out, '> ', text
+ print >>out, '> ', ' ' * p + '^'
+
+ def grab_input(self):
+ """Get more input if possible."""
+ if not self.file:
+ return
+ if len(self.input) - self.pos >= MIN_WINDOW:
+ return
+
+ data = self.file.read(MIN_WINDOW)
+ if data is None or data == "":
+ self.file = None
+
+ # Drop bytes from the start, if necessary.
+ if self.pos > 2 * MIN_WINDOW:
+ self.del_pos += MIN_WINDOW
+ self.del_line += self.input[:MIN_WINDOW].count("\n")
+ self.pos -= MIN_WINDOW
+ self.input = self.input[MIN_WINDOW:] + data
+ else:
+ self.input = self.input + data
+
+ def getchar(self):
+ """Return the next character."""
+ self.grab_input()
+
+ c = self.input[self.pos]
+ self.pos += 1
+ return c
+
+ def _scan(self, restrict, context=None):
+ """
+ Should scan another token and add it to the list, self.tokens,
+ and add the restriction to self.restrictions
+ """
+ # Keep looking for a token, ignoring any in self.ignore
+ while True:
+ tok = None
+
+ self.grab_input()
+
+ # special handling for end-of-file
+ if self.stacked and self.pos == len(self.input):
+ raise StopIteration
+
+ # Search the patterns for the longest match, with earlier
+ # tokens in the list having preference
+ best_match = -1
+ best_pat = None
+ best_m = None
+ for tok, regex in self.patterns:
+ if DEBUG:
+ print("\tTrying %s: %s at pos %d -> %s" % (repr(tok), repr(regex.pattern), self.pos, repr(self.input)))
+ # First check to see if we're ignoring this token
+ if restrict and tok not in restrict and tok not in self.ignore:
+ if DEBUG:
+ print "\tSkipping %s!" % repr(tok)
+ continue
+ m = regex.match(self.input, self.pos)
+ if m and m.end() - m.start() > best_match:
+ # We got a match that's better than the previous one
+ best_pat = tok
+ best_match = m.end() - m.start()
+ best_m = m
+ if DEBUG:
+ print("Match OK! %s: %s at pos %d" % (repr(tok), repr(regex.pattern), self.pos))
+
+ # If we didn't find anything, raise an error
+ if best_pat is None or best_match < 0:
+ msg = "Bad token: %s" % ("???" if tok is None else repr(tok),)
+ if restrict:
+ msg = "%s found while trying to find one of the restricted tokens: %s" % ("???" if tok is None else repr(tok), ", ".join(repr(r) for r in restrict))
+ raise SyntaxError(self.get_pos(), msg, context=context)
+
+ ignore = best_pat in self.ignore
+ end_pos = self.pos + best_match
+ value = self.input[self.pos:end_pos]
+ if not ignore:
+ # token = Token(type=best_pat, value=value, pos=self.get_pos())
+ token = (
+ self.pos,
+ end_pos,
+ best_pat,
+ value,
+ )
+ self.pos = end_pos
+
+ npos = value.rfind("\n")
+ if npos > -1:
+ self.col = best_match - npos
+ self.line += value.count('\n')
+ else:
+ self.col += best_match
+
+ # If we found something that isn't to be ignored, return it
+ if not ignore:
+ # print repr(token)
+ if not self.tokens or token != self.last_read_token:
+ # Only add this token if it's not in the list
+ # (to prevent looping)
+ self.last_read_token = token
+ self.tokens.append(token)
+ self.restrictions.append(restrict)
+ return 1
+ return 0
+ else:
+ ignore = self.ignore[best_pat]
+ if ignore:
+ ignore(self, best_m)
+
+ def token(self, i, restrict=None, **kwargs):
+ """
+ Get the i'th token, and if i is one past the end, then scan
+ for another token; restrict is a list of tokens that
+ are allowed, or 0 for any token.
+ """
+ context = kwargs.get("context")
+ tokens_len = len(self.tokens)
+ if i == tokens_len: # We are at the end, get the next...
+ tokens_len += self._scan(restrict, context)
+ elif i >= 0 and i < tokens_len:
+ if restrict and self.restrictions[i] and restrict > self.restrictions[i]:
+ raise NotImplementedError("Unimplemented: restriction set changed")
+ if i >= 0 and i < tokens_len:
+ return self.tokens[i]
+ raise NoMoreTokens
+
+
+class Parser(object):
+ """Base class for Yapps-generated parsers.
+
+ """
+
+ def __init__(self, scanner):
+ self._scanner = scanner
+ self._pos = 0
+
+ def reset(self, input):
+ self._scanner.reset(input)
+ self._pos = 0
+
+ def _peek(self, types, **kwargs):
+ """Returns the token type for lookahead; if there are any args
+ then the list of args is the set of token types to allow"""
+ try:
+ tok = self._scanner.token(self._pos, types)
+ return tok[2]
+ except SyntaxError:
+ return None
+
+ def _scan(self, type, **kwargs):
+ """Returns the matched text, and moves to the next token"""
+ tok = self._scanner.token(self._pos, set([type]))
+ if tok[2] != type:
+ raise SyntaxError("SyntaxError[@ char %s: %s]" % (repr(tok[0]), "Trying to find " + type))
+ self._pos += 1
+ return tok[3]
+
+ def _rewind(self, n=1):
+ self._pos -= min(n, self._pos)
+ self._scanner.rewind(self._pos)
+
+
+class Context(object):
+ """Class to represent the parser's call stack.
+
+ Every rule creates a Context that links to its parent rule. The
+ contexts can be used for debugging.
+
+ """
+
+ def __init__(self, parent, scanner, rule, args=()):
+ """Create a new context.
+
+ Args:
+ parent: Context object or None
+ scanner: Scanner object
+ rule: string (name of the rule)
+ args: tuple listing parameters to the rule
+
+ """
+ self.parent = parent
+ self.scanner = scanner
+ self.rule = rule
+ self.args = args
+ self.token = scanner.last_read_token
+
+ def __str__(self):
+ output = ''
+ if self.parent:
+ output = str(self.parent) + ' > '
+ output += self.rule
+ return output
+
+
+def print_error(err, scanner, max_ctx=None):
+ """Print error messages, the parser stack, and the input text -- for human-readable error messages."""
+ # NOTE: this function assumes 80 columns :-(
+ # Figure out the line number
+ pos = err.pos
+ if not pos:
+ pos = scanner.get_pos()
+
+ file_name, line_number, column_number = pos
+ print >>sys.stderr, '%s:%d:%d: %s' % (file_name, line_number, column_number, err.msg)
+
+ scanner.print_line_with_pointer(pos)
+
+ context = err.context
+ token = None
+ while context:
+ print >>sys.stderr, 'while parsing %s%s:' % (context.rule, tuple(context.args))
+ if context.token:
+ token = context.token
+ if token:
+ scanner.print_line_with_pointer(token.pos, length=len(token.value))
+ context = context.parent
+ if max_ctx:
+ max_ctx = max_ctx-1
+ if not max_ctx:
+ break
+
+
+def wrap_error_reporter(parser, rule, *args, **kwargs):
+ try:
+ return getattr(parser, rule)(*args, **kwargs)
+ except SyntaxError, e:
+ print_error(e, parser._scanner)
+ except NoMoreTokens:
+ print >>sys.stderr, 'Could not complete parsing; stopped around here:'
+ print >>sys.stderr, parser._scanner
diff --git a/scss/src/grammar/scss.g b/scss/src/yapps/scss.g
index ac4d2c1..ac4d2c1 100644
--- a/scss/src/grammar/scss.g
+++ b/scss/src/yapps/scss.g
diff --git a/scss/src/yapps/scss.py b/scss/src/yapps/scss.py
new file mode 100644
index 0000000..166ecd3
--- /dev/null
+++ b/scss/src/yapps/scss.py
@@ -0,0 +1,299 @@
+# python yapps2.py grammar.g grammar.py
+
+################################################################################
+## Grammar compiled using Yapps:
+
+# Begin -- grammar generated by Yapps
+import re
+try:
+ from yapps.runtime import Scanner, Parser, Context, wrap_error_reporter
+except ImportError:
+ from runtime import Scanner, Parser, Context, wrap_error_reporter
+
+
+class SassExpressionScanner(Scanner):
+ patterns = None
+ _patterns = [
+ ('":"', ':'),
+ ('[ \r\t\n]+', '[ \r\t\n]+'),
+ ('COMMA', ','),
+ ('LPAR', '\\(|\\['),
+ ('RPAR', '\\)|\\]'),
+ ('END', '$'),
+ ('MUL', '[*]'),
+ ('DIV', '/'),
+ ('ADD', '[+]'),
+ ('SUB', '-\\s'),
+ ('SIGN', '-(?![a-zA-Z_])'),
+ ('AND', '(?<![-\\w])and(?![-\\w])'),
+ ('OR', '(?<![-\\w])or(?![-\\w])'),
+ ('NOT', '(?<![-\\w])not(?![-\\w])'),
+ ('NE', '!='),
+ ('INV', '!'),
+ ('EQ', '=='),
+ ('LE', '<='),
+ ('GE', '>='),
+ ('LT', '<'),
+ ('GT', '>'),
+ ('STR', "'[^']*'"),
+ ('QSTR', '"[^"]*"'),
+ ('UNITS', '(?<!\\s)(?:[a-zA-Z]+|%)(?![-\\w])'),
+ ('NUM', '(?:\\d+(?:\\.\\d*)?|\\.\\d+)'),
+ ('COLOR', '#(?:[a-fA-F0-9]{6}|[a-fA-F0-9]{3})(?![a-fA-F0-9])'),
+ ('VAR', '\\$[-a-zA-Z0-9_]+'),
+ ('NAME', '\\$?[-a-zA-Z0-9_]+'),
+ ('FNCT', '[-a-zA-Z_][-a-zA-Z0-9_]*(?=\\()'),
+ ('ID', '!?[-a-zA-Z_][-a-zA-Z0-9_]*'),
+ ]
+
+ def __init__(self, str, *args, **kw):
+ if hasattr(self, 'setup_patterns'):
+ self.setup_patterns(self._patterns)
+ elif self.patterns is None:
+ self.__class__.patterns = []
+ for t, p in self._patterns:
+ self.patterns.append((t, re.compile(p)))
+ super(SassExpressionScanner, self).__init__(None, {'[ \r\t\n]+': None, }, str, *args, **kw)
+
+
+class SassExpression(Parser):
+ Context = Context
+
+ def goal(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'goal', [])
+ expr_lst = self.expr_lst(_context)
+ END = self._scan('END', context=_context)
+ return expr_lst
+
+ def expr_lst(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'expr_lst', [])
+ expr_item = self.expr_item(_context)
+ v = [expr_item]
+ while self._peek(self._END_COMMA_RPAR, context=_context) == self._COMMA:
+ COMMA = self._scan('COMMA', context=_context)
+ expr_item = (None, Literal(Undefined()))
+ if self._peek(self._LPAR_END_NAME_COLOR_QSTR_SIGN_, context=_context) not in self._END_COMMA_RPAR:
+ expr_item = self.expr_item(_context)
+ v.append(expr_item)
+ return ListLiteral(v) if len(v) > 1 else v[0][1]
+
+ def expr_item(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'expr_item', [])
+ NAME = None
+ if self._peek(self._LPAR_NAME_COLOR_QSTR_SIGN_VAR_, context=_context) == self._NAME:
+ NAME = self._scan('NAME', context=_context)
+ self._scan('":"', context=_context)
+ expr_slst = self.expr_slst(_context)
+ return (NAME, expr_slst)
+
+ def expr_slst(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'expr_slst', [])
+ or_expr = self.or_expr(_context)
+ v = [(None, or_expr)]
+ while self._peek(self._LPAR_END_COLOR_QSTR_SIGN_VAR__, context=_context) not in self._END_COMMA_RPAR:
+ or_expr = self.or_expr(_context)
+ v.append((None, or_expr))
+ return ListLiteral(v, comma=False) if len(v) > 1 else v[0][1]
+
+ def or_expr(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'or_expr', [])
+ and_expr = self.and_expr(_context)
+ v = and_expr
+ while self._peek(self._LPAR_RPAR_END_COLOR_QSTR_ID_V_, context=_context) == self._OR:
+ OR = self._scan('OR', context=_context)
+ and_expr = self.and_expr(_context)
+ v = AnyOp(v, and_expr)
+ return v
+
+ def and_expr(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'and_expr', [])
+ not_expr = self.not_expr(_context)
+ v = not_expr
+ while self._peek(self._AND_LPAR_END_COLOR_QSTR_SIGN__, context=_context) == self._AND:
+ AND = self._scan('AND', context=_context)
+ not_expr = self.not_expr(_context)
+ v = AllOp(v, not_expr)
+ return v
+
+ def not_expr(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'not_expr', [])
+ _token = self._peek(self._LPAR_COLOR_QSTR_SIGN_VAR_ADD__, context=_context)
+ if _token != self._NOT:
+ comparison = self.comparison(_context)
+ return comparison
+ else: # == self._NOT
+ NOT = self._scan('NOT', context=_context)
+ not_expr = self.not_expr(_context)
+ return NotOp(not_expr)
+
+ def comparison(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'comparison', [])
+ a_expr = self.a_expr(_context)
+ v = a_expr
+ while self._peek(self._LPAR_QSTR_RPAR_LE_COLOR_NE_LT_, context=_context) in self._GT_GE_NE_LT_LE_EQ:
+ _token = self._peek(self._GT_GE_NE_LT_LE_EQ, context=_context)
+ if _token == self._LT:
+ LT = self._scan('LT', context=_context)
+ a_expr = self.a_expr(_context)
+ v = BinaryOp(operator.lt, v, a_expr)
+ elif _token == self._GT:
+ GT = self._scan('GT', context=_context)
+ a_expr = self.a_expr(_context)
+ v = BinaryOp(operator.gt, v, a_expr)
+ elif _token == self._LE:
+ LE = self._scan('LE', context=_context)
+ a_expr = self.a_expr(_context)
+ v = BinaryOp(operator.le, v, a_expr)
+ elif _token == self._GE:
+ GE = self._scan('GE', context=_context)
+ a_expr = self.a_expr(_context)
+ v = BinaryOp(operator.ge, v, a_expr)
+ elif _token == self._EQ:
+ EQ = self._scan('EQ', context=_context)
+ a_expr = self.a_expr(_context)
+ v = BinaryOp(operator.eq, v, a_expr)
+ else: # == self._NE
+ NE = self._scan('NE', context=_context)
+ a_expr = self.a_expr(_context)
+ v = BinaryOp(operator.ne, v, a_expr)
+ return v
+
+ def a_expr(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'a_expr', [])
+ m_expr = self.m_expr(_context)
+ v = m_expr
+ while self._peek(self._LPAR_SUB_QSTR_RPAR_LE_COLOR_N_, context=_context) in self._ADD_SUB:
+ _token = self._peek(self._ADD_SUB, context=_context)
+ if _token == self._ADD:
+ ADD = self._scan('ADD', context=_context)
+ m_expr = self.m_expr(_context)
+ v = BinaryOp(operator.add, v, m_expr)
+ else: # == self._SUB
+ SUB = self._scan('SUB', context=_context)
+ m_expr = self.m_expr(_context)
+ v = BinaryOp(operator.sub, v, m_expr)
+ return v
+
+ def m_expr(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'm_expr', [])
+ u_expr = self.u_expr(_context)
+ v = u_expr
+ while self._peek(self._LPAR_SUB_QSTR_RPAR_MUL_DIV_LE_, context=_context) in self._MUL_DIV:
+ _token = self._peek(self._MUL_DIV, context=_context)
+ if _token == self._MUL:
+ MUL = self._scan('MUL', context=_context)
+ u_expr = self.u_expr(_context)
+ v = BinaryOp(operator.mul, v, u_expr)
+ else: # == self._DIV
+ DIV = self._scan('DIV', context=_context)
+ u_expr = self.u_expr(_context)
+ v = BinaryOp(operator.truediv, v, u_expr)
+ return v
+
+ def u_expr(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'u_expr', [])
+ _token = self._peek(self._LPAR_COLOR_QSTR_SIGN_ADD_NUM__, context=_context)
+ if _token == self._SIGN:
+ SIGN = self._scan('SIGN', context=_context)
+ u_expr = self.u_expr(_context)
+ return UnaryOp(operator.neg, u_expr)
+ elif _token == self._ADD:
+ ADD = self._scan('ADD', context=_context)
+ u_expr = self.u_expr(_context)
+ return UnaryOp(operator.pos, u_expr)
+ else: # in self._LPAR_COLOR_QSTR_NUM_FNCT_STR__
+ atom = self.atom(_context)
+ return atom
+
+ def atom(self, _parent=None):
+ _context = self.Context(_parent, self._scanner, 'atom', [])
+ _token = self._peek(self._LPAR_COLOR_QSTR_NUM_FNCT_STR__, context=_context)
+ if _token == self._ID:
+ ID = self._scan('ID', context=_context)
+ return Literal(parse_bareword(ID))
+ elif _token == self._LPAR:
+ LPAR = self._scan('LPAR', context=_context)
+ expr_lst = ListLiteral()
+ if self._peek(self._LPAR_END_NAME_COLOR_QSTR_SIGN__, context=_context) not in self._END_RPAR:
+ expr_lst = self.expr_lst(_context)
+ RPAR = self._scan('RPAR', context=_context)
+ return Parentheses(expr_lst)
+ elif _token == self._FNCT:
+ FNCT = self._scan('FNCT', context=_context)
+ LPAR = self._scan('LPAR', context=_context)
+ expr_lst = ListLiteral()
+ if self._peek(self._LPAR_END_NAME_COLOR_QSTR_SIGN__, context=_context) not in self._END_RPAR:
+ expr_lst = self.expr_lst(_context)
+ RPAR = self._scan('RPAR', context=_context)
+ return CallOp(FNCT, expr_lst)
+ elif _token == self._NUM:
+ NUM = self._scan('NUM', context=_context)
+ UNITS = None
+ if self._peek(self._LPAR_SUB_QSTR_RPAR_VAR_MUL_DI_, context=_context) == self._UNITS:
+ UNITS = self._scan('UNITS', context=_context)
+ return Literal(NumberValue(float(NUM), unit=UNITS))
+ elif _token == self._STR:
+ STR = self._scan('STR', context=_context)
+ return Literal(String(STR[1:-1], quotes="'"))
+ elif _token == self._QSTR:
+ QSTR = self._scan('QSTR', context=_context)
+ return Literal(String(QSTR[1:-1], quotes='"'))
+ elif _token == self._COLOR:
+ COLOR = self._scan('COLOR', context=_context)
+ return Literal(ColorValue(ParserValue(COLOR)))
+ else: # == self._VAR
+ VAR = self._scan('VAR', context=_context)
+ return Variable(VAR)
+
+ _COLOR = 'COLOR'
+ _GE = 'GE'
+ _FNCT = 'FNCT'
+ _AND = 'AND'
+ _ADD_SUB = set(['ADD', 'SUB'])
+ _DIV = 'DIV'
+ _COMMA = 'COMMA'
+ _LPAR_COLOR_QSTR_NUM_FNCT_STR__ = set(['LPAR', 'COLOR', 'QSTR', 'NUM', 'FNCT', 'STR', 'VAR', 'ID'])
+ _GT = 'GT'
+ _EQ = 'EQ'
+ _GT_GE_NE_LT_LE_EQ = set(['GT', 'GE', 'NE', 'LT', 'LE', 'EQ'])
+ _END_COMMA_RPAR = set(['END', 'COMMA', 'RPAR'])
+ _LPAR_NAME_COLOR_QSTR_SIGN_VAR_ = set(['LPAR', 'NAME', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'FNCT', 'STR', 'NOT', 'ID'])
+ _LT = 'LT'
+ _LPAR_SUB_QSTR_RPAR_LE_COLOR_N_ = set(['LPAR', 'SUB', 'QSTR', 'RPAR', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'GE', 'FNCT', 'STR', 'VAR', 'EQ', 'ID', 'AND', 'ADD', 'NOT', 'OR'])
+ _MUL = 'MUL'
+ _STR = 'STR'
+ _NAME = 'NAME'
+ _LPAR_END_NAME_COLOR_QSTR_SIGN__ = set(['LPAR', 'END', 'NAME', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'FNCT', 'STR', 'NOT', 'RPAR', 'ID'])
+ _LE = 'LE'
+ _VAR = 'VAR'
+ _NE = 'NE'
+ _SIGN = 'SIGN'
+ _QSTR = 'QSTR'
+ _LPAR_SUB_QSTR_RPAR_MUL_DIV_LE_ = set(['LPAR', 'SUB', 'QSTR', 'RPAR', 'MUL', 'DIV', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'GE', 'FNCT', 'STR', 'VAR', 'EQ', 'ID', 'AND', 'ADD', 'NOT', 'OR'])
+ _UNITS = 'UNITS'
+ _LPAR_RPAR_END_COLOR_QSTR_ID_V_ = set(['LPAR', 'RPAR', 'END', 'COLOR', 'QSTR', 'ID', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'SIGN', 'OR'])
+ _NUM = 'NUM'
+ _LPAR_QSTR_RPAR_LE_COLOR_NE_LT_ = set(['LPAR', 'QSTR', 'RPAR', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'ADD', 'FNCT', 'STR', 'VAR', 'EQ', 'ID', 'AND', 'GE', 'NOT', 'OR'])
+ _END_RPAR = set(['END', 'RPAR'])
+ _LPAR_SUB_QSTR_RPAR_VAR_MUL_DI_ = set(['LPAR', 'SUB', 'QSTR', 'RPAR', 'VAR', 'MUL', 'DIV', 'LE', 'COLOR', 'NE', 'LT', 'NUM', 'COMMA', 'GT', 'END', 'SIGN', 'GE', 'FNCT', 'STR', 'UNITS', 'EQ', 'ID', 'AND', 'ADD', 'NOT', 'OR'])
+ _AND_LPAR_END_COLOR_QSTR_SIGN__ = set(['AND', 'LPAR', 'END', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'ID', 'RPAR', 'OR'])
+ _LPAR_END_NAME_COLOR_QSTR_SIGN_ = set(['LPAR', 'END', 'NAME', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'RPAR', 'ID'])
+ _LPAR_COLOR_QSTR_SIGN_VAR_ADD__ = set(['LPAR', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'FNCT', 'STR', 'NOT', 'ID'])
+ _MUL_DIV = set(['MUL', 'DIV'])
+ _ID = 'ID'
+ _OR = 'OR'
+ _LPAR_END_COLOR_QSTR_SIGN_VAR__ = set(['LPAR', 'END', 'COLOR', 'QSTR', 'SIGN', 'VAR', 'ADD', 'NUM', 'COMMA', 'FNCT', 'STR', 'NOT', 'RPAR', 'ID'])
+ _LPAR = 'LPAR'
+ _ADD = 'ADD'
+ _NOT = 'NOT'
+ _LPAR_COLOR_QSTR_SIGN_ADD_NUM__ = set(['LPAR', 'COLOR', 'QSTR', 'SIGN', 'ADD', 'NUM', 'FNCT', 'STR', 'VAR', 'ID'])
+ _SUB = 'SUB'
+
+
+def parse(rule, text):
+ P = SassExpression(SassExpressionScanner(text))
+ return wrap_error_reporter(P, rule)
+
+# End -- grammar generated by Yapps
+### Grammar ends.
+################################################################################
diff --git a/scss/src/yapps/yapps_grammar.g b/scss/src/yapps/yapps_grammar.g
new file mode 100644
index 0000000..b2ee047
--- /dev/null
+++ b/scss/src/yapps/yapps_grammar.g
@@ -0,0 +1,133 @@
+# grammar.py, part of Yapps 2 - yet another python parser system
+# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
+# Enhancements copyright 2003-2004 by Matthias Urlichs <smurf@debian.org>
+#
+# This version of the Yapps 2 grammar can be distributed under the
+# terms of the MIT open source license, either found in the LICENSE
+# file included with the Yapps distribution
+# <http://theory.stanford.edu/~amitp/yapps/> or at
+# <http://www.opensource.org/licenses/mit-license.php>
+#
+
+"""Parser for Yapps grammars.
+
+This file defines the grammar of Yapps grammars. Naturally, it is
+implemented in Yapps. The grammar.py module needed by Yapps is built
+by running Yapps on yapps_grammar.g. (Holy circularity, Batman!)
+
+"""
+
+try:
+ from yapps import parsetree
+except ImportError:
+ import parsetree
+
+
+######################################################################
+def cleanup_choice(rule, lst):
+ if len(lst) == 0:
+ return parsetree.Sequence(rule, [])
+ if len(lst) == 1:
+ return lst[0]
+ return parsetree.Choice(rule, *tuple(lst))
+
+
+def cleanup_sequence(rule, lst):
+ if len(lst) == 1:
+ return lst[0]
+ return parsetree.Sequence(rule, *tuple(lst))
+
+
+def resolve_name(rule, tokens, id, args):
+ if id in [x[0] for x in tokens]:
+ # It's a token
+ if args:
+ print 'Warning: ignoring parameters on TOKEN %s<<%s>>' % (id, args)
+ return parsetree.Terminal(rule, id)
+ else:
+ # It's a name, so assume it's a nonterminal
+ return parsetree.NonTerminal(rule, id, args)
+
+%%
+parser ParserDescription:
+
+ ignore: "[ \t\r\n]+"
+ ignore: "#.*?\r?\n"
+ token EOF: "$"
+ token ATTR: "<<.+?>>"
+ token STMT: "{{.+?}}"
+ token ID: '[a-zA-Z_][a-zA-Z_0-9]*'
+ token STR: '[rR]?\'([^\\n\'\\\\]|\\\\.)*\'|[rR]?"([^\\n"\\\\]|\\\\.)*"'
+ token LP: '\\('
+ token RP: '\\)'
+ token LB: '\\['
+ token RB: '\\]'
+ token OR: '[|]'
+ token STAR: '[*]'
+ token PLUS: '[+]'
+ token QUEST: '[?]'
+ token COLON: ':'
+
+ rule Parser: "parser" ID ":"
+ Options
+ Tokens
+ Rules<<Tokens>>
+ EOF
+ {{ return parsetree.Generator(ID, Options, Tokens, Rules) }}
+
+ rule Options: {{ opt = {} }}
+ ( "option" ":" Str {{ opt[Str] = 1 }} )*
+ {{ return opt }}
+
+ rule Tokens: {{ tok = [] }}
+ (
+ "token" ID
+ ":" Str {{ tid = (ID, Str) }}
+ ( STMT {{ tid += (STMT[2:-2],) }} )?
+ {{ tok.append(tid) }}
+ | "ignore"
+ ":" Str {{ ign = ('#ignore', Str) }}
+ ( STMT {{ ign += (STMT[2:-2],) }} )?
+ {{ tok.append(ign) }}
+ )*
+ {{ return tok }}
+
+ rule Rules<<tokens>>:
+ {{ rul = [] }}
+ (
+ "rule" ID OptParam ":" ClauseA<<ID, tokens>>
+ {{ rul.append((ID, OptParam, ClauseA)) }}
+ )*
+ {{ return rul }}
+
+ rule ClauseA<<rule, tokens>>:
+ ClauseB<<rule, tokens>>
+ {{ v = [ClauseB] }}
+ ( OR ClauseB<<rule, tokens>> {{ v.append(ClauseB) }} )*
+ {{ return cleanup_choice(rule, v) }}
+
+ rule ClauseB<<rule, tokens>>:
+ {{ v = [] }}
+ ( ClauseC<<rule, tokens>> {{ v.append(ClauseC) }} )*
+ {{ return cleanup_sequence(rule, v) }}
+
+ rule ClauseC<<rule, tokens>>:
+ ClauseD<<rule, tokens>>
+ ( PLUS {{ return parsetree.Plus(rule, ClauseD) }}
+ | STAR {{ return parsetree.Star(rule, ClauseD) }}
+ | QUEST {{ return parsetree.Option(rule, ClauseD) }}
+ | {{ return ClauseD }} )
+
+ rule ClauseD<<rule, tokens>>:
+ STR {{ t = (STR, eval(STR, {}, {})) }}
+ {{ if t not in tokens: }}
+ {{ tokens.insert(0, t) }}
+ {{ return parsetree.Terminal(rule, STR) }}
+ | ID OptParam {{ return resolve_name(rule, tokens, ID, OptParam) }}
+ | LP ClauseA<<rule, tokens>> RP {{ return ClauseA }}
+ | LB ClauseA<<rule, tokens>> RB {{ return parsetree.Option(rule, ClauseA) }}
+ | STMT {{ return parsetree.Eval(rule, STMT[2:-2]) }}
+
+ rule OptParam: [ ATTR {{ return ATTR[2:-2] }} ] {{ return '' }}
+ rule Str: STR {{ return eval(STR, {}, {}) }}
+%%