summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdrian Thurston <thurston@complang.org>2012-12-19 18:07:28 -0500
committerAdrian Thurston <thurston@complang.org>2012-12-19 18:07:28 -0500
commit02e14456d8ad8dfcd9632399b9fec7c6135a1562 (patch)
treeb6b58a80ae9f6d1583f32a86ab89c342f89731f1
parent97eef93ee9abfe10be71f873877e898d455146c6 (diff)
downloadcolm-02e14456d8ad8dfcd9632399b9fec7c6135a1562.tar.gz
update test cases for changes in parent commit
-rw-r--r--test/accum2.lm49
-rw-r--r--test/accumbt2.lm67
-rw-r--r--test/accumbt3.lm76
-rw-r--r--test/binary1.lm793
-rw-r--r--test/btscan1.lm16
-rw-r--r--test/btscan2.lm10
-rw-r--r--test/context1.lm3
-rw-r--r--test/context2.lm5
-rw-r--r--test/context3.lm5
-rw-r--r--test/counting1.lm3
-rw-r--r--test/counting3.lm5
-rw-r--r--test/counting4.lm3
-rw-r--r--test/generate1.lm3
-rw-r--r--test/generate2.lm4
-rw-r--r--test/heredoc.lm3
-rw-r--r--test/lookup1.lm3
-rw-r--r--test/maxlen.lm3
-rw-r--r--test/rubyhere.lm3
-rw-r--r--test/superid.lm3
-rw-r--r--test/tags1.lm3
-rw-r--r--test/tags3.lm4
-rw-r--r--test/tags4.lm4
-rw-r--r--test/translate2.lm5
-rw-r--r--test/undofrag1.lm5
-rw-r--r--test/undofrag2.lm5
-rw-r--r--test/undofrag3.lm5
26 files changed, 550 insertions, 538 deletions
diff --git a/test/accum2.lm b/test/accum2.lm
index 7f6bb2cb..58729f91 100644
--- a/test/accum2.lm
+++ b/test/accum2.lm
@@ -1,28 +1,29 @@
context ctx
-{
- i: int
- j: int
- k: int
-
- lex
- ignore /space+/
- literal '*', '(', ')'
- token id /[a-zA-Z_]+/
- end
-
- def foo [id]
-
- def item
- [id]
- | [foo]
- | ['(' item* ')']
- {
- i = 0
- }
-
- def start
- [item*]
-}
+
+i: int
+j: int
+k: int
+
+lex
+ ignore /space+/
+ literal '*', '(', ')'
+ token id /[a-zA-Z_]+/
+end
+
+def foo [id]
+
+def item
+ [id]
+| [foo]
+| ['(' item* ')']
+ {
+ i = 0
+ }
+
+def start
+ [item*]
+
+end ctx
cons SP: parser<ctx::start> []
diff --git a/test/accumbt2.lm b/test/accumbt2.lm
index 40bd2bbd..529bf849 100644
--- a/test/accumbt2.lm
+++ b/test/accumbt2.lm
@@ -1,37 +1,38 @@
context accum_bt
-{
- lex
- ignore /[ \t\n]+/
- token id1 /[a-zA-Z_][a-zA-Z_0-9]*/
-
- def one [ id1* ]
- end
-
- OneParser: accum<one>
-
- lex
- ignore /[ \t]+/
- token id2 /[a-zA-Z_][a-zA-Z_0-9]*/
- literal '!', ';', '\n'
-
- def A1 []
- { print( "A1\n" ) }
-
- def A2 []
- { print( "A2\n" ) }
-
- def item2
- [id2]
- {
- send OneParser [' extra ']
- send OneParser [$r1]
- }
-
- def two
- [A1 item2* '!' '\n']
- | [A2 item2* ';' '\n']
- end
-}
+
+lex
+ ignore /[ \t\n]+/
+ token id1 /[a-zA-Z_][a-zA-Z_0-9]*/
+
+ def one [ id1* ]
+end
+
+OneParser: accum<one>
+
+lex
+ ignore /[ \t]+/
+ token id2 /[a-zA-Z_][a-zA-Z_0-9]*/
+ literal '!', ';', '\n'
+
+ def A1 []
+ { print( "A1\n" ) }
+
+ def A2 []
+ { print( "A2\n" ) }
+
+ def item2
+ [id2]
+ {
+ send OneParser [' extra ']
+ send OneParser [$r1]
+ }
+
+ def two
+ [A1 item2* '!' '\n']
+ | [A2 item2* ';' '\n']
+end
+
+end accum_bt
AccumBt: accum_bt = cons accum_bt[]
AccumBt.OneParser = cons parser<accum_bt::one>[]
diff --git a/test/accumbt3.lm b/test/accumbt3.lm
index 7046cbf5..18e1de3f 100644
--- a/test/accumbt3.lm
+++ b/test/accumbt3.lm
@@ -36,43 +36,47 @@ def nested_comment
def nested [id*]
+#
+# Accumulator.
+#
context accum_bt
-{
- NestedParser: accum<nested>
-
- lex
- ignore /[ \t]+/
- token word /[a-zA-Z0-9/*+_\-]+/
- token stuff /[a-zA-Z0-9()/*+_\- ]+/
- literal '!', ';', '\n'
-
- def A1 []
- { print( "A1\n" ) }
-
- def A2 []
- { print( "A2\n" ) }
-
- def item
- [word]
- {
- send NestedParser [' ']
- send NestedParser [$r1]
- send NestedParser [' ']
- }
- |
- [stuff]
- {
- send NestedParser [' ']
- send NestedParser [$r1]
- send NestedParser [' ']
- }
-
- def two
- [A1 item* '!' '\n']
- |
- [A2 item* ';' '\n']
- end
-}
+
+NestedParser: accum<nested>
+
+lex
+ ignore /[ \t]+/
+ token word /[a-zA-Z0-9/*+_\-]+/
+ token stuff /[a-zA-Z0-9()/*+_\- ]+/
+ literal '!', ';', '\n'
+
+end
+
+def A1 []
+ { print( "A1\n" ) }
+
+def A2 []
+ { print( "A2\n" ) }
+
+def item
+ [word]
+ {
+ send NestedParser [' ']
+ send NestedParser [$r1]
+ send NestedParser [' ']
+ }
+|
+ [stuff]
+ {
+ send NestedParser [' ']
+ send NestedParser [$r1]
+ send NestedParser [' ']
+ }
+
+def two
+ [A1 item* '!' '\n']
+| [A2 item* ';' '\n']
+
+end accum_bt
cons AccumBt: accum_bt[]
AccumBt.NestedParser = cons parser<nested>[]
diff --git a/test/binary1.lm b/test/binary1.lm
index c025abb5..68a7f23d 100644
--- a/test/binary1.lm
+++ b/test/binary1.lm
@@ -1,488 +1,489 @@
context binary
-{
- # Used for most of the grammar.
- token octet /any/
-
- # Filled in during the parsing of resource records. Determine what RR_UNKNOWN
- # translates to.
- rr_type_value: int
- rr_class_value: int
-
- # Tokens generated from RR_UNKNOWN. Used to pick the kind
- # of resource record to attempt to parse.
- token RR_A // # 1 a host address
- token RR_NS // # 2 an authoritative name server
- token RR_MD // # 3 a mail destination (Obsolete - use MX)
- token RR_MF // # 4 a mail forwarder (Obsolete - use MX)
- token RR_CNAME // # 5 the canonical name for an alias
- token RR_SOA // # 6 marks the start of a zone of authority
- token RR_MB // # 7 a mailbox domain name (EXPERIMENTAL)
- token RR_MG // # 8 a mail group member (EXPERIMENTAL)
- token RR_MR // # 9 a mail rename domain name (EXPERIMENTAL)
- token RR_NULL // # 10 a null RR (EXPERIMENTAL)
- token RR_WKS // # 11 a well known service description
- token RR_PTR // # 12 a domain name pointer
- token RR_HINFO // # 13 host information
- token RR_MINFO // # 14 mailbox or mail list information
- token RR_MX // # 15 mail exchange
- token RR_TXT // # 16 text strings
-
- token RR_UNKNOWN
- /''/
- {
- id: int = typeid<RR_UNKNOWN>
- if rr_type_value == 1
- id = typeid<RR_A>
- elsif rr_type_value == 2
- id = typeid<RR_NS>
- elsif rr_type_value == 5
- id = typeid<RR_CNAME>
- elsif rr_type_value == 12
- id = typeid<RR_PTR>
- elsif rr_type_value == 15
- id = typeid<RR_MX>
- elsif rr_type_value == 16
- id = typeid<RR_TXT>
-
- input.push( make_token( id '' ) )
- }
- # Convert two octets in network order into an unsigned 16 bit value.
- int network_uord16( o1: octet o2: octet )
+# Used for most of the grammar.
+token octet /any/
+
+# Filled in during the parsing of resource records. Determine what RR_UNKNOWN
+# translates to.
+rr_type_value: int
+rr_class_value: int
+
+# Tokens generated from RR_UNKNOWN. Used to pick the kind
+# of resource record to attempt to parse.
+token RR_A // # 1 a host address
+token RR_NS // # 2 an authoritative name server
+token RR_MD // # 3 a mail destination (Obsolete - use MX)
+token RR_MF // # 4 a mail forwarder (Obsolete - use MX)
+token RR_CNAME // # 5 the canonical name for an alias
+token RR_SOA // # 6 marks the start of a zone of authority
+token RR_MB // # 7 a mailbox domain name (EXPERIMENTAL)
+token RR_MG // # 8 a mail group member (EXPERIMENTAL)
+token RR_MR // # 9 a mail rename domain name (EXPERIMENTAL)
+token RR_NULL // # 10 a null RR (EXPERIMENTAL)
+token RR_WKS // # 11 a well known service description
+token RR_PTR // # 12 a domain name pointer
+token RR_HINFO // # 13 host information
+token RR_MINFO // # 14 mailbox or mail list information
+token RR_MX // # 15 mail exchange
+token RR_TXT // # 16 text strings
+
+token RR_UNKNOWN
+ /''/
{
- return o1.data.uord8() * 256 + o2.data.uord8()
+ id: int = typeid<RR_UNKNOWN>
+ if rr_type_value == 1
+ id = typeid<RR_A>
+ elsif rr_type_value == 2
+ id = typeid<RR_NS>
+ elsif rr_type_value == 5
+ id = typeid<RR_CNAME>
+ elsif rr_type_value == 12
+ id = typeid<RR_PTR>
+ elsif rr_type_value == 15
+ id = typeid<RR_MX>
+ elsif rr_type_value == 16
+ id = typeid<RR_TXT>
+
+ input.push( make_token( id '' ) )
}
+# Convert two octets in network order into an unsigned 16 bit value.
+int network_uord16( o1: octet o2: octet )
+{
+ return o1.data.uord8() * 256 + o2.data.uord8()
+}
- def message
- [header questions answers authorities additionals]
-
- question_count: int
- answer_count: int
- authority_count: int
- additional_count: int
-
- # Message Header
- #
- # 1 1 1 1 1 1
- # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | ID |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | QDCOUNT |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | ANCOUNT |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | NSCOUNT |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | ARCOUNT |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- def header
- [header_id header_fields count count count count]
- {
- question_count = r3.count
- answer_count = r4.count
- authority_count = r5.count
- additional_count = r6.count
- }
- def header_id
- [octet octet]
+def message
+ [header questions answers authorities additionals]
+
+question_count: int
+answer_count: int
+authority_count: int
+additional_count: int
+
+# Message Header
+#
+# 1 1 1 1 1 1
+# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | ID |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | QDCOUNT |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | ANCOUNT |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | NSCOUNT |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | ARCOUNT |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+def header
+ [header_id header_fields count count count count]
+ {
+ question_count = r3.count
+ answer_count = r4.count
+ authority_count = r5.count
+ additional_count = r6.count
+ }
- def header_fields
- [octet octet]
+def header_id
+ [octet octet]
- def count
- count: int
- [octet octet]
- {
- lhs.count = network_uord16( r1 r2 )
- }
+def header_fields
+ [octet octet]
- #
- # Counting Primitives
- #
- # Uses a stack of lengths. Using a stack allows for counted lists to be
- # nested. As the list is consumed it brings the count down to zero. To use
- # it, push a new count value to the list and include it in a
- # right-recursive list like so:
- #
- # def LIST
- # [count_inc ITEM LIST]
- # [count_end]
- # end
- #
-
- CL: list<int>
-
- int start_list( count: int )
+def count
+ count: int
+ [octet octet]
{
- CL.push( count )
+ lhs.count = network_uord16( r1 r2 )
}
- def count_inc
- []
- {
- if CL.top == 0 {
- reject
- } else {
- CL.top = CL.top - 1
- }
+#
+# Counting Primitives
+#
+# Uses a stack of lengths. Using a stack allows for counted lists to be
+# nested. As the list is consumed it brings the count down to zero. To use
+# it, push a new count value to the list and include it in a
+# right-recursive list like so:
+#
+# def LIST
+# [count_inc ITEM LIST]
+# [count_end]
+# end
+#
+
+CL: list<int>
+
+int start_list( count: int )
+{
+ CL.push( count )
+}
+
+def count_inc
+ []
+ {
+ if CL.top == 0 {
+ reject
+ } else {
+ CL.top = CL.top - 1
}
+ }
- def count_end
- []
- {
- if CL.top != 0 {
- reject
- } else {
- CL.pop()
- }
+def count_end
+ []
+ {
+ if CL.top != 0 {
+ reject
+ } else {
+ CL.pop()
}
+ }
- #
- # Octet List
- #
+#
+# Octet List
+#
- # General octet list. Length must be set to use this.
- def octet_list
- [count_inc octet octet_list]
- | [count_end]
+# General octet list. Length must be set to use this.
+def octet_list
+ [count_inc octet octet_list]
+| [count_end]
- #
- # Names
- #
+#
+# Names
+#
- def name
- [name_part* name_end]
+def name
+ [name_part* name_end]
- # Name part lists are terminated by a zero length or a pointer.
- def name_end
- # Zero length ending
- [octet]
- {
- val: int = r1.data.uord8()
- if val != 0 {
- reject
- }
+# Name part lists are terminated by a zero length or a pointer.
+def name_end
+ # Zero length ending
+ [octet]
+ {
+ val: int = r1.data.uord8()
+ if val != 0 {
+ reject
}
+ }
- # Pointer ending
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | 1 1| OFFSET |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- | [octet octet]
- {
- val: int = r1.data.uord8()
- if val < 64 {
- reject
- }
+ # Pointer ending
+ # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+ # | 1 1| OFFSET |
+ # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+| [octet octet]
+ {
+ val: int = r1.data.uord8()
+ if val < 64 {
+ reject
}
+ }
- #
- # Get some number of bytes.
- #
-
- # How many to get
- nbytes: int
-
- # We use this token to eliminate the lookahead that would be needed to
- # cause a reduce of part_len. This forces whatever comes before nbytes to
- # be reduced before nbytes_data token is fetched from the scanner. We need
- # this because nbytes_data depends on the nbytes in the context and we need
- # to ensure that it is set.
- token nb_empty /''/
-
- # Fetch nbytes bytes.
- token nbytes_data
- /''/
- {
- input.push( make_token( typeid<nbytes_data> input.pull(nbytes) ) )
- }
+#
+# Get some number of bytes.
+#
- def nbytes
- [nb_empty nbytes_data]
+# How many to get
+nbytes: int
- def name_part
- [part_len nbytes]
+# We use this token to eliminate the lookahead that would be needed to
+# cause a reduce of part_len. This forces whatever comes before nbytes to
+# be reduced before nbytes_data token is fetched from the scanner. We need
+# this because nbytes_data depends on the nbytes in the context and we need
+# to ensure that it is set.
+token nb_empty /''/
+# Fetch nbytes bytes.
+token nbytes_data
+ /''/
+ {
+ input.push( make_token( typeid<nbytes_data> input.pull(nbytes) ) )
+ }
- def part_len
- [octet]
- {
- # A name part list is terminated either by a zero length or a pointer,
- # which must have the two high bits set.
- count: int = r1.data.uord8()
- if count == 0 || count >= 64 {
- reject
- } else {
- # Set the number of bytes to get for the name part.
- nbytes = count
- }
+def nbytes
+ [nb_empty nbytes_data]
+
+def name_part
+ [part_len nbytes]
+
+
+def part_len
+ [octet]
+ {
+ # A name part list is terminated either by a zero length or a pointer,
+ # which must have the two high bits set.
+ count: int = r1.data.uord8()
+ if count == 0 || count >= 64 {
+ reject
+ } else {
+ # Set the number of bytes to get for the name part.
+ nbytes = count
}
+ }
- #
- # Resource Records
- #
+#
+# Resource Records
+#
+
+# 1 1 1 1 1 1
+# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | |
+# / /
+# / NAME /
+# | |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | TYPE |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | CLASS |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | TTL |
+# | |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | RDLENGTH |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
+# / RDATA /
+# / /
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+def resource_record
+ [name rr_type rr_class ttl rdlength rdata]
+
+def rr_type
+ [octet octet]
+ {
+ rr_type_value = network_uord16( r1 r2 )
+ }
- # 1 1 1 1 1 1
- # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | |
- # / /
- # / NAME /
- # | |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | TYPE |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | CLASS |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | TTL |
- # | |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | RDLENGTH |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
- # / RDATA /
- # / /
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+def rr_class
+ value: int
+ [octet octet]
+ {
+ rr_class_value = network_uord16( r1 r2 )
+ }
- def resource_record
- [name rr_type rr_class ttl rdlength rdata]
+def ttl
+ [octet octet octet octet]
- def rr_type
- [octet octet]
- {
- rr_type_value = network_uord16( r1 r2 )
- }
+token rdata_bytes
+ /''/
+ {
+ input.push( make_token( typeid<rdata_bytes> input.pull(rdata_length) ) )
+ }
- def rr_class
- value: int
- [octet octet]
- {
- rr_class_value = network_uord16( r1 r2 )
- }
+def rdlength
+ [octet octet]
+ {
+ rdata_length = network_uord16( r1 r2 )
+ }
- def ttl
- [octet octet octet octet]
+rdata_length: int
+
+def rdata
+ [RR_UNKNOWN rdata_bytes]
+| [RR_A address]
+| [RR_NS name]
+| [RR_CNAME name]
+| [RR_PTR name]
+| [RR_MX octet octet name]
+| [RR_TXT rdata_bytes]
- token rdata_bytes
- /''/
- {
- input.push( make_token( typeid<rdata_bytes> input.pull(rdata_length) ) )
- }
- def rdlength
- [octet octet]
- {
- rdata_length = network_uord16( r1 r2 )
- }
+#
+# Address
+#
+def address [octet octet octet octet]
- rdata_length: int
-
- def rdata
- [RR_UNKNOWN rdata_bytes]
- | [RR_A address]
- | [RR_NS name]
- | [RR_CNAME name]
- | [RR_PTR name]
- | [RR_MX octet octet name]
- | [RR_TXT rdata_bytes]
-
-
- #
- # Address
- #
- def address [octet octet octet octet]
-
- #
- # List of Questions
- #
-
- def questions
- [load_question_count question_list]
-
- def load_question_count
- []
- {
- start_list( question_count )
- }
+#
+# List of Questions
+#
- def question_list
- [count_inc question question_list]
- | [count_end]
+def questions
+ [load_question_count question_list]
- #
- # Question
- #
+def load_question_count
+ []
+ {
+ start_list( question_count )
+ }
- # 1 1 1 1 1 1
- # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | |
- # / QNAME /
- # / /
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | QTYPE |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- # | QCLASS |
- # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+def question_list
+ [count_inc question question_list]
+| [count_end]
- def question
- [name qtype qclass]
+#
+# Question
+#
- def qtype
- [octet octet]
+# 1 1 1 1 1 1
+# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | |
+# / QNAME /
+# / /
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | QTYPE |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+# | QCLASS |
+# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
- def qclass
- [octet octet]
+def question
+ [name qtype qclass]
- #
- # List of Answers
- #
+def qtype
+ [octet octet]
- def answers
- [load_answer_count answer_list]
+def qclass
+ [octet octet]
- def load_answer_count
- []
- {
- start_list( answer_count )
- }
+#
+# List of Answers
+#
- def answer_list
- [count_inc answer answer_list]
- | [count_end]
+def answers
+ [load_answer_count answer_list]
- #
- # Answer
- #
+def load_answer_count
+ []
+ {
+ start_list( answer_count )
+ }
- def answer
- [resource_record]
+def answer_list
+ [count_inc answer answer_list]
+| [count_end]
- #
- # List of Authorities
- #
+#
+# Answer
+#
- def authorities
- [load_authority_count authority_list]
+def answer
+ [resource_record]
- def load_authority_count
- []
- {
- start_list( authority_count )
- }
+#
+# List of Authorities
+#
- def authority_list
- [count_inc authority authority_list]
- | [count_end]
+def authorities
+ [load_authority_count authority_list]
- #
- # Authority
- #
+def load_authority_count
+ []
+ {
+ start_list( authority_count )
+ }
- def authority
- [resource_record]
+def authority_list
+ [count_inc authority authority_list]
+| [count_end]
- #
- # List of Additionals
- #
+#
+# Authority
+#
- def additionals
- [load_additional_count additional_list]
+def authority
+ [resource_record]
- def load_additional_count
- []
- {
- start_list( additional_count )
- }
+#
+# List of Additionals
+#
+
+def additionals
+ [load_additional_count additional_list]
- def additional_list
- [count_inc additional additional_list]
- | [count_end]
+def load_additional_count
+ []
+ {
+ start_list( additional_count )
+ }
- #
- # Additional
- #
+def additional_list
+ [count_inc additional additional_list]
+| [count_end]
- def additional
- [resource_record]
+#
+# Additional
+#
+def additional
+ [resource_record]
- def start
- [message*]
- #
- # Grammar End.
- #
+def start
+ [message*]
- int print_RR_UNKNOWN( s: start )
- {
- for I:rdata in s {
- if match I [u:RR_UNKNOWN rdata_bytes] {
- print( 'UNKNOWN TYPE\n' )
- }
+#
+# Grammar End.
+#
+
+int print_RR_UNKNOWN( s: start )
+{
+ for I:rdata in s {
+ if match I [u:RR_UNKNOWN rdata_bytes] {
+ print( 'UNKNOWN TYPE\n' )
}
}
+}
- int print_RR_A( s: start )
- {
- for I:rdata in s {
- if match I [RR_A o1:octet o2:octet o3:octet o4:octet] {
- print( 'RR_A: ' o1.data.uord8() '.' o2.data.uord8() '.'
- o3.data.uord8() '.' o4.data.uord8() '\n' )
- }
+int print_RR_A( s: start )
+{
+ for I:rdata in s {
+ if match I [RR_A o1:octet o2:octet o3:octet o4:octet] {
+ print( 'RR_A: ' o1.data.uord8() '.' o2.data.uord8() '.'
+ o3.data.uord8() '.' o4.data.uord8() '\n' )
}
}
+}
- int print_name( n: name m: map<int name> )
- {
- for P: name_part in n {
- match P [part_len D:nbytes]
- print( D '.' )
- }
+int print_name( n: name m: map<int name> )
+{
+ for P: name_part in n {
+ match P [part_len D:nbytes]
+ print( D '.' )
+ }
- for E:name_end in n {
- if match E [o1:octet o2:octet] {
- val: int = (o1.data.uord8() - 192) * 256 + o2.data.uord8()
- print( '[' val ']' )
- nameInMap: name = m.find( val )
- print_name( nameInMap m )
- }
+ for E:name_end in n {
+ if match E [o1:octet o2:octet] {
+ val: int = (o1.data.uord8() - 192) * 256 + o2.data.uord8()
+ print( '[' val ']' )
+ nameInMap: name = m.find( val )
+ print_name( nameInMap m )
}
}
+}
- int print_all_names( s: start )
- {
- for M: message in s {
- construct m: map<int name> []
+int print_all_names( s: start )
+{
+ for M: message in s {
+ construct m: map<int name> []
- O: octet = octet in M
+ O: octet = octet in M
- for N: name in M {
- match N [name_part* E:name_end]
+ for N: name in M {
+ match N [name_part* E:name_end]
- for NP: name_part* in N {
- if match NP [L: octet nbytes name_part*] {
- messageOffset: int = L.pos - O.pos
- construct n: name [NP E]
- m.insert( messageOffset n )
- }
+ for NP: name_part* in N {
+ if match NP [L: octet nbytes name_part*] {
+ messageOffset: int = L.pos - O.pos
+ construct n: name [NP E]
+ m.insert( messageOffset n )
}
}
+ }
- for I: name in M {
- print_name( I m )
- print( '\n' )
- }
+ for I: name in M {
+ print_name( I m )
+ print( '\n' )
}
}
}
+end binary
+
cons Binary: binary[]
Binary.CL = cons list<int> []
diff --git a/test/btscan1.lm b/test/btscan1.lm
index 041462eb..6afa707b 100644
--- a/test/btscan1.lm
+++ b/test/btscan1.lm
@@ -1,15 +1,22 @@
+#
+# R1
+#
namespace r1
-{
+
lex
literal '!', 'a', 'b'
ignore /[ \n\t]+/
end
def line [ '!' 'a' 'b' 'b' 'a']
-}
+end r1
+
+#
+# R2
+#
namespace r2
-{
+
lex
literal '!'
token id /[a-zA-Z_]+/
@@ -17,7 +24,8 @@ namespace r2
end
def line [ '!' id ]
-}
+
+end r2
def item
[r1::line]
diff --git a/test/btscan2.lm b/test/btscan2.lm
index 1dfc8ec1..bcad2f66 100644
--- a/test/btscan2.lm
+++ b/test/btscan2.lm
@@ -1,15 +1,16 @@
namespace r1
-{
+
lex
literal '!', 'a', 'b'
ignore /[ \n\t]+/
end
def line [ '!' 'a' 'b' 'b' 'a']
-}
+
+end r1
namespace r2
-{
+
lex
literal '!'
token id /[a-zA-Z_]+/
@@ -17,7 +18,8 @@ namespace r2
end
def line [ '!' id ]
-}
+
+end r2
def item
[r1::line]
diff --git a/test/context1.lm b/test/context1.lm
index 919c6157..121390cc 100644
--- a/test/context1.lm
+++ b/test/context1.lm
@@ -1,5 +1,4 @@
context ctx
-{
i: int
j: int
k: int
@@ -25,7 +24,7 @@ context ctx
def start
[item*]
-}
+end ctx
CTX: ctx = cons ctx[]
parse InputP: ctx::start( CTX ) [ stdin ]
diff --git a/test/context2.lm b/test/context2.lm
index 48371f3b..8ff3feb0 100644
--- a/test/context2.lm
+++ b/test/context2.lm
@@ -1,5 +1,5 @@
context ruby_here
-{
+
rl ident_pattern /[a-zA-Z_][a-zA-Z_0-9]*/
rl number_pattern /[0-9]+/
@@ -86,7 +86,8 @@ context ruby_here
def start
[item*]
-}
+
+end ruby_here
CTX: ruby_here = cons ruby_here []
diff --git a/test/context3.lm b/test/context3.lm
index 41f8de0b..834a1639 100644
--- a/test/context3.lm
+++ b/test/context3.lm
@@ -1,5 +1,5 @@
context ctx
-{
+
i: int
j: int
k: int
@@ -32,7 +32,8 @@ context ctx
def start
[item*]
-}
+
+end ctx
CTX: ctx = cons ctx []
parse InputP: ctx::start( CTX ) [stdin]
diff --git a/test/counting1.lm b/test/counting1.lm
index 9d73a4fd..9f9ca974 100644
--- a/test/counting1.lm
+++ b/test/counting1.lm
@@ -1,5 +1,4 @@
context counting
-{
#
# Regular Definitions
@@ -89,7 +88,7 @@ context counting
}
}
}
-}
+end counting
cons Counting: counting[]
parse counting::start(Counting)[ stdin ]
diff --git a/test/counting3.lm b/test/counting3.lm
index ada6c1d9..b40a0952 100644
--- a/test/counting3.lm
+++ b/test/counting3.lm
@@ -1,5 +1,5 @@
context counting
-{
+
#
# Regular Definitions
#
@@ -89,7 +89,8 @@ context counting
}
print( '*** SUCCESS ***\n' )
}
-}
+
+end counting
cons Counting: counting[]
parse counting::start(Counting)[ stdin ]
diff --git a/test/counting4.lm b/test/counting4.lm
index ea0f1ac9..868dc73b 100644
--- a/test/counting4.lm
+++ b/test/counting4.lm
@@ -1,5 +1,4 @@
context counting
-{
#
# Regular Definitions
@@ -86,7 +85,7 @@ context counting
}
print( '*** SUCCESS ***\n' )
}
-}
+end counting
cons Counting: counting[]
parse counting::start(Counting)[stdin]
diff --git a/test/generate1.lm b/test/generate1.lm
index 20869b14..81f82b6e 100644
--- a/test/generate1.lm
+++ b/test/generate1.lm
@@ -1,5 +1,4 @@
context generate
-{
# Regular definitions
rl ident_char /[a-zA-Z_]/
@@ -683,7 +682,7 @@ context generate
def keyword_item
[identifier '=' expression]
-}
+end generate
int print_stmts( S: generate::start )
{
diff --git a/test/generate2.lm b/test/generate2.lm
index 3d89a6e5..137b678e 100644
--- a/test/generate2.lm
+++ b/test/generate2.lm
@@ -1,5 +1,4 @@
context generate
-{
def open_item
type: str
num: int
@@ -188,7 +187,8 @@ context generate
def start
[item*]
-}
+
+end generate
cons Generate: generate[]
diff --git a/test/heredoc.lm b/test/heredoc.lm
index 23a8b114..57b42b20 100644
--- a/test/heredoc.lm
+++ b/test/heredoc.lm
@@ -1,5 +1,4 @@
context heredoc
-{
rl ident_char /[a-zA-Z_]/
lex
@@ -41,7 +40,7 @@ context heredoc
def start
[here_name here_data here_close id nl]
-}
+end heredoc
cons HereDoc: heredoc[]
diff --git a/test/lookup1.lm b/test/lookup1.lm
index a6fcd4dd..afdfda4d 100644
--- a/test/lookup1.lm
+++ b/test/lookup1.lm
@@ -1,5 +1,4 @@
context lookup
-{
#
# Data types for global data.
#
@@ -2139,7 +2138,7 @@ context lookup
print( '\n' )
}
-}
+end lookup
#
# Global data declarations
diff --git a/test/maxlen.lm b/test/maxlen.lm
index 4291bc17..b7f8c250 100644
--- a/test/maxlen.lm
+++ b/test/maxlen.lm
@@ -1,6 +1,5 @@
context maxlen
-{
#
# Regular Definitions
@@ -43,7 +42,7 @@ context maxlen
def start
[restricted_list id*]
-}
+end maxlen
cons MaxLen: maxlen[]
MaxLen.allow = 3
diff --git a/test/rubyhere.lm b/test/rubyhere.lm
index 39fc9848..263091d5 100644
--- a/test/rubyhere.lm
+++ b/test/rubyhere.lm
@@ -1,5 +1,4 @@
context rubyhere
-{
rl ident_pattern /[a-zA-Z_][a-zA-Z_0-9]*/
rl number_pattern /[0-9]+/
@@ -86,7 +85,7 @@ context rubyhere
def start
[item*]
-}
+end rubyhere
cons RubyHere: rubyhere[]
diff --git a/test/superid.lm b/test/superid.lm
index 75bebe69..0e032c5c 100644
--- a/test/superid.lm
+++ b/test/superid.lm
@@ -1,5 +1,4 @@
context si
-{
lex
literal '!', 'a', ';\n'
@@ -56,7 +55,7 @@ context si
match lhs [Item2:item2 ';\n']
print( Item2.msg )
}
-}
+end si
cons SuperId: si[]
parse S: si::start(SuperId)[stdin]
diff --git a/test/tags1.lm b/test/tags1.lm
index 06edfe33..4c5f4f87 100644
--- a/test/tags1.lm
+++ b/test/tags1.lm
@@ -1,5 +1,4 @@
context tags
-{
# Open and close tags by rewriting to generic close tags. Won't work if
# interested in unclosed tags because a token can start as not close_id, but
# then become a close id during the course of parsing.
@@ -80,7 +79,7 @@ context tags
print_xml( lhs )
print( 'failed\n' )
}
-}
+end tags
cons Tags: tags[]
Tags.TS = cons tags::tag_stack ["sentinal"]
diff --git a/test/tags3.lm b/test/tags3.lm
index 75ac2ab3..1f7fe2f2 100644
--- a/test/tags3.lm
+++ b/test/tags3.lm
@@ -1,5 +1,5 @@
context tags
-{
+
#
# Regular Definitions
#
@@ -295,7 +295,7 @@ context tags
# }
# }
# }
-}
+end tag
cons Tags: tags[]
Tags.TagStack = construct tags::tag_stack []
diff --git a/test/tags4.lm b/test/tags4.lm
index 6983a574..53b3c868 100644
--- a/test/tags4.lm
+++ b/test/tags4.lm
@@ -6,8 +6,6 @@
#
context tags
-{
-
#
# Regular Definitions
#
@@ -261,7 +259,7 @@ context tags
{
return true
}
-}
+end tags
# Finds unclosed tags and puts the content after the tag. Afterwards
# all unclosed tags will be empty 'inside'.
diff --git a/test/translate2.lm b/test/translate2.lm
index 7d746e28..57d0ef19 100644
--- a/test/translate2.lm
+++ b/test/translate2.lm
@@ -12,7 +12,7 @@ def start2
[item2*]
context ctx
-{
+
lex
ignore /space+/
literal '*', '(', ')', '!', ';\n'
@@ -43,7 +43,8 @@ context ctx
def start
[A item* '!']
| [B item* ';\n']
-}
+
+end ctx
CTX: ctx = cons ctx []
parse InputP: ctx::start( CTX ) [ stdin ]
diff --git a/test/undofrag1.lm b/test/undofrag1.lm
index de0a7cd2..73b41e45 100644
--- a/test/undofrag1.lm
+++ b/test/undofrag1.lm
@@ -13,7 +13,7 @@ def start2
context ctx
-{
+
SP: parser<start2>
lex
@@ -40,7 +40,8 @@ context ctx
def start1
[A item* '!']
| [B item* ';\n']
-}
+
+end ctx
CTX: ctx = cons ctx []
diff --git a/test/undofrag2.lm b/test/undofrag2.lm
index f8659bae..66641af0 100644
--- a/test/undofrag2.lm
+++ b/test/undofrag2.lm
@@ -1,5 +1,5 @@
context undo
-{
+
lex
ignore /[ \t]+/
literal '*', '(', ')', '^', ';', '\n'
@@ -34,7 +34,8 @@ context undo
def start
[A1 item* '^']
| [A2 item* ';' '\n']
-}
+
+end undo
cons Undo: undo[]
Undo.Out = construct parser<undo::out> []
diff --git a/test/undofrag3.lm b/test/undofrag3.lm
index 9a39e98d..baef8f07 100644
--- a/test/undofrag3.lm
+++ b/test/undofrag3.lm
@@ -1,5 +1,5 @@
context undo
-{
+
lex
ignore /[ \t]+/
literal '*', '(', ')', '^', ';', '\n', '.'
@@ -39,7 +39,8 @@ context undo
def start
[A1 item* F '.' '^']
| [A2 item* F '.' ';' '\n']
-}
+
+end undo
cons Undo: undo[]
Undo.Out = construct parser<undo::out> []