blob: 7a709f2fc4754413ac41c1ef2c2349b01aee573c (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
|
#
# Tokens
#
# Any single character can be a literal
lex start
{
# Ignore whitespace.
ignore /[ \t\n\r\v]+/
# Open and close id
token id /[a-zA-Z_][a-zA-Z0-9_]*/
token open_paren /'('/
{
parse_stop NC: nested_comment( input )
print( 'discarding: ' NC '\n' )
}
}
#
# Token translation
#
lex nc_scan
{
literal '(', ')'
token nc_data /[^()]+/
}
def nc_item
[nc_data]
| [nested_comment]
def nested_comment
['(' nc_item* ')']
def nested [id*]
global NestedParser: accum<nested> =
cons accum<nested>[]
lex two
{
ignore /[ \t]+/
token word /[a-zA-Z0-9/*+_\-]+/
token stuff /[a-zA-Z0-9()/*+_\- ]+/
literal '!', ';', '\n'
def A1 []
{ print( "A1\n" ) }
def A2 []
{ print( "A2\n" ) }
def item
[word]
{
send NestedParser [' ']
send NestedParser [$r1]
send NestedParser [' ']
}
|
[stuff]
{
send NestedParser [' ']
send NestedParser [$r1]
send NestedParser [' ']
}
def two
[A1 item* '!' '\n']
|
[A2 item* ';' '\n']
}
parse TwoP: two[ stdin ]
Two: two = TwoP.tree
Nested: nested = NestedParser.finish()
print( '\n------------\n' )
print( ^Nested '\n' )
print( ^Two '\n' )
|